idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,000
private EventRecord readGroup ( EventRecord er , StringBuffer sb , String prefix ) { Pattern pattern = Pattern . compile ( ".*(" + prefix + "\\s*\\d*)\\s*:\\s*(\\+?\\d+)" , Pattern . MULTILINE ) ; Matcher matcher = pattern . matcher ( sb ) ; while ( matcher . find ( ) ) er . set ( matcher . group ( 1 ) , matcher . group ( 2 ) ) ; return er ; }
Reads and parses lines that provide the output of a group of sensors with the same functionality .
32,001
public static boolean isChecksumFile ( Path file ) { String name = file . getName ( ) ; return name . startsWith ( "." ) && name . endsWith ( ".crc" ) ; }
Return true iff file is a checksum file name .
32,002
public void copyToLocalFile ( Path src , Path dst , boolean copyCrc ) throws IOException { if ( ! fs . isDirectory ( src ) ) { fs . copyToLocalFile ( src , dst ) ; FileSystem localFs = getLocal ( getConf ( ) ) . getRawFileSystem ( ) ; if ( localFs . isDirectory ( dst ) ) { dst = new Path ( dst , src . getName ( ) ) ; } dst = getChecksumFile ( dst ) ; if ( localFs . exists ( dst ) ) { localFs . delete ( dst , true ) ; } Path checksumFile = getChecksumFile ( src ) ; if ( copyCrc && fs . exists ( checksumFile ) ) { fs . copyToLocalFile ( checksumFile , dst ) ; } } else { FileStatus [ ] srcs = listStatus ( src ) ; for ( FileStatus srcFile : srcs ) { copyToLocalFile ( srcFile . getPath ( ) , new Path ( dst , srcFile . getPath ( ) . getName ( ) ) , copyCrc ) ; } } }
The src file is under FS and the dst is on the local disk . Copy it from FS control to the local dst name . If src and dst are directories the copyCrc parameter determines whether to copy CRC files .
32,003
public boolean reportChecksumFailure ( Path f , FSDataInputStream in , long inPos , FSDataInputStream sums , long sumsPos ) { return false ; }
Report a checksum error to the file system .
32,004
public CRC32 recoverParityBlockToStream ( FileSystem fs , FileStatus srcStat , long blockSize , Path parityFile , long corruptOffset , OutputStream out , Progressable progress ) throws IOException { LOG . info ( "Recovering parity block" + parityFile + ":" + corruptOffset ) ; Path srcFile = srcStat . getPath ( ) ; corruptOffset = ( corruptOffset / blockSize ) * blockSize ; OutputStream [ ] outs = new OutputStream [ codec . parityLength ] ; long indexOfCorruptBlockInParityStripe = ( corruptOffset / blockSize ) % codec . parityLength ; LOG . info ( "Index of corrupt block in parity stripe: " + indexOfCorruptBlockInParityStripe ) ; CRC32 [ ] crcOuts = null ; if ( checksumStore != null ) { crcOuts = new CRC32 [ codec . parityLength ] ; } for ( int i = 0 ; i < codec . parityLength ; i ++ ) { if ( indexOfCorruptBlockInParityStripe == i ) { outs [ i ] = out ; if ( checksumStore != null ) { crcOuts [ i ] = new CRC32 ( ) ; } } else { outs [ i ] = new NullOutputStream ( ) ; } } long stripeIdx = corruptOffset / ( codec . parityLength * blockSize ) ; StripeReader sReader = StripeReader . getStripeReader ( codec , conf , blockSize , fs , stripeIdx , srcStat ) ; assert sReader . hasNext ( ) == true ; InputStream [ ] blocks = sReader . getNextStripeInputs ( ) . getInputs ( ) ; LOG . info ( "Starting recovery by using source stripe " + srcFile + ": stripe " + stripeIdx ) ; try { encodeStripe ( blocks , blockSize , outs , crcOuts , progress , false , null ) ; if ( checksumStore != null ) { return crcOuts [ ( int ) indexOfCorruptBlockInParityStripe ] ; } else { return null ; } } finally { RaidUtils . closeStreams ( blocks ) ; } }
Recovers a corrupt block in a parity file to a local file .
32,005
void encodeStripe ( InputStream [ ] blocks , long blockSize , OutputStream [ ] outs , CRC32 [ ] crcOuts , Progressable reporter , boolean computeSrcChecksum , List < Integer > errorLocations ) throws IOException { configureBuffers ( blockSize ) ; int boundedBufferCapacity = 1 ; ParallelStreamReader parallelReader = new ParallelStreamReader ( reporter , blocks , bufSize , parallelism , boundedBufferCapacity , blockSize , computeSrcChecksum , outs ) ; parallelReader . start ( ) ; try { for ( long encoded = 0 ; encoded < blockSize ; encoded += bufSize ) { ParallelStreamReader . ReadResult readResult = null ; try { readResult = parallelReader . getReadResult ( ) ; } catch ( InterruptedException e ) { throw new IOException ( "Interrupted while waiting for read result" ) ; } IOException readEx = readResult . getException ( ) ; if ( readEx != null ) { if ( errorLocations != null ) { errorLocations . clear ( ) ; for ( int idx : readResult . getErrorIdx ( ) ) { errorLocations . add ( idx ) ; } } throw readEx ; } code . encodeBulk ( readResult . readBufs , writeBufs ) ; reporter . progress ( ) ; int toWrite = ( int ) Math . min ( blockSize - encoded , bufSize ) ; for ( int i = 0 ; i < codec . parityLength ; i ++ ) { outs [ i ] . write ( writeBufs [ i ] , 0 , toWrite ) ; if ( crcOuts != null && crcOuts [ i ] != null ) { crcOuts [ i ] . update ( writeBufs [ i ] , 0 , toWrite ) ; } reporter . progress ( ) ; } } if ( computeSrcChecksum ) { parallelReader . collectSrcBlocksChecksum ( checksumStore ) ; } } finally { parallelReader . shutdown ( ) ; } }
Wraps around encodeStripeImpl in order to configure buffers . Having buffers of the right size is extremely important . If the the buffer size is not a divisor of the block size we may end up reading across block boundaries .
32,006
public static void writeStringOpt ( DataOutput out , String str ) throws IOException { if ( str == null ) { WritableUtils . writeVInt ( out , NULL_STRING_LENGTH ) ; return ; } final int len = str . length ( ) ; TempArrays ta = UTF8 . getArrays ( len ) ; byte [ ] rawBytes = ta . byteArray ; char [ ] charArray = ta . charArray ; str . getChars ( 0 , len , charArray , 0 ) ; boolean ascii = true ; for ( int i = 0 ; i < len ; i ++ ) { if ( charArray [ i ] > UTF8 . MAX_ASCII_CODE ) { ascii = false ; break ; } rawBytes [ i ] = ( byte ) charArray [ i ] ; } if ( ascii ) { WritableUtils . writeVInt ( out , len ) ; out . write ( rawBytes , 0 , len ) ; } else { writeString ( out , str ) ; } }
Writes the string to the output if possible the encoding part is optimized .
32,007
public EventRecord query ( String device ) throws UnknownHostException { StringBuffer sb = Environment . runCommand ( "/sbin/ifconfig " + device ) ; EventRecord retval = new EventRecord ( InetAddress . getLocalHost ( ) . getCanonicalHostName ( ) , InetAddress . getAllByName ( InetAddress . getLocalHost ( ) . getHostName ( ) ) , Calendar . getInstance ( ) , "NIC" , "Unknown" , device , "-" ) ; retval . set ( "hwAddress" , findPattern ( "HWaddr\\s*([\\S{2}:]{17})" , sb . toString ( ) , 1 ) ) ; retval . set ( "ipAddress" , findPattern ( "inet\\s+addr:\\s*([\\w.?]*)" , sb . toString ( ) , 1 ) ) ; String tmp = findPattern ( "inet\\s+addr:\\s*([\\w.?]*)" , sb . toString ( ) , 1 ) ; retval . set ( "status" , ( tmp == null ) ? "DOWN" : "UP" ) ; if ( tmp != null ) retval . set ( "ipAddress" , tmp ) ; retval . set ( "rxPackets" , findPattern ( "RX\\s*packets\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "rxErrors" , findPattern ( "RX.+errors\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "rxDropped" , findPattern ( "RX.+dropped\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "rxOverruns" , findPattern ( "RX.+overruns\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "rxFrame" , findPattern ( "RX.+frame\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "txPackets" , findPattern ( "TX\\s*packets\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "txErrors" , findPattern ( "TX.+errors\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "txDropped" , findPattern ( "TX.+dropped\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "txOverruns" , findPattern ( "TX.+overruns\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "txCarrier" , findPattern ( "TX.+carrier\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "collisions" , findPattern ( "\\s+collisions\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "rxBytes" , findPattern ( "RX\\s*bytes\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; retval . set ( "txBytes" , findPattern ( "TX\\s*bytes\\s*:\\s*(\\d+)" , sb . toString ( ) , 1 ) ) ; return retval ; }
Reads and parses the output of ifconfig for a specified NIC and creates an appropriate EventRecord that holds the desirable information for it .
32,008
public boolean next ( K key , TupleWritable value ) throws IOException { if ( jc . flush ( value ) ) { WritableUtils . cloneInto ( key , jc . key ( ) ) ; return true ; } jc . clear ( ) ; K iterkey = createKey ( ) ; final PriorityQueue < ComposableRecordReader < K , ? > > q = getRecordReaderQueue ( ) ; while ( ! q . isEmpty ( ) ) { fillJoinCollector ( iterkey ) ; jc . reset ( iterkey ) ; if ( jc . flush ( value ) ) { WritableUtils . cloneInto ( key , jc . key ( ) ) ; return true ; } jc . clear ( ) ; } return false ; }
Emit the next set of key value pairs as defined by the child RecordReaders and operation associated with this composite RR .
32,009
public void store ( JobInProgress job ) { if ( active && retainTime > 0 ) { JobID jobId = job . getStatus ( ) . getJobID ( ) ; Path jobStatusFile = getInfoFilePath ( jobId ) ; try { FSDataOutputStream dataOut = fs . create ( jobStatusFile ) ; job . getStatus ( ) . write ( dataOut ) ; job . getProfile ( ) . write ( dataOut ) ; job . getCounters ( ) . write ( dataOut ) ; TaskCompletionEvent [ ] events = job . getTaskCompletionEvents ( 0 , Integer . MAX_VALUE ) ; dataOut . writeInt ( events . length ) ; for ( TaskCompletionEvent event : events ) { event . write ( dataOut ) ; } dataOut . close ( ) ; } catch ( IOException ex ) { LOG . warn ( "Could not store [" + jobId + "] job info : " + ex . getMessage ( ) , ex ) ; try { fs . delete ( jobStatusFile , true ) ; } catch ( IOException ex1 ) { } } } }
Persists a job in DFS .
32,010
public JobStatus readJobStatus ( JobID jobId ) { JobStatus jobStatus = null ; if ( null == jobId ) { LOG . warn ( "Could not read job status for null jobId" ) ; return null ; } if ( active ) { try { FSDataInputStream dataIn = getJobInfoFile ( jobId ) ; if ( dataIn != null ) { jobStatus = readJobStatus ( dataIn ) ; dataIn . close ( ) ; } } catch ( IOException ex ) { LOG . warn ( "Could not read [" + jobId + "] job status : " + ex , ex ) ; } } return jobStatus ; }
This method retrieves JobStatus information from DFS stored using store method .
32,011
public JobProfile readJobProfile ( JobID jobId ) { JobProfile jobProfile = null ; if ( active ) { try { FSDataInputStream dataIn = getJobInfoFile ( jobId ) ; if ( dataIn != null ) { readJobStatus ( dataIn ) ; jobProfile = readJobProfile ( dataIn ) ; dataIn . close ( ) ; } } catch ( IOException ex ) { LOG . warn ( "Could not read [" + jobId + "] job profile : " + ex , ex ) ; } } return jobProfile ; }
This method retrieves JobProfile information from DFS stored using store method .
32,012
public Counters readCounters ( JobID jobId ) { Counters counters = null ; if ( active ) { try { FSDataInputStream dataIn = getJobInfoFile ( jobId ) ; if ( dataIn != null ) { readJobStatus ( dataIn ) ; readJobProfile ( dataIn ) ; counters = readCounters ( dataIn ) ; dataIn . close ( ) ; } } catch ( IOException ex ) { LOG . warn ( "Could not read [" + jobId + "] job counters : " + ex , ex ) ; } } return counters ; }
This method retrieves Counters information from DFS stored using store method .
32,013
public TaskCompletionEvent [ ] readJobTaskCompletionEvents ( JobID jobId , int fromEventId , int maxEvents ) { TaskCompletionEvent [ ] events = TaskCompletionEvent . EMPTY_ARRAY ; if ( active ) { try { FSDataInputStream dataIn = getJobInfoFile ( jobId ) ; if ( dataIn != null ) { readJobStatus ( dataIn ) ; readJobProfile ( dataIn ) ; readCounters ( dataIn ) ; events = readEvents ( dataIn , fromEventId , maxEvents ) ; dataIn . close ( ) ; } } catch ( IOException ex ) { LOG . warn ( "Could not read [" + jobId + "] job events : " + ex , ex ) ; } } return events ; }
This method retrieves TaskCompletionEvents information from DFS stored using store method .
32,014
public static int findBytes ( byte [ ] utf , int start , int end , byte [ ] b ) { int matchEnd = end - b . length ; for ( int i = start ; i <= matchEnd ; i ++ ) { boolean matched = true ; for ( int j = 0 ; j < b . length ; j ++ ) { if ( utf [ i + j ] != b [ j ] ) { matched = false ; break ; } } if ( matched ) { return i ; } } return - 1 ; }
Find the first occurrence of the given bytes b in a UTF - 8 encoded string
32,015
public String getStatus ( ) { StringBuffer s = new StringBuffer ( ) ; s . append ( "Maps : " + completedMaps + "/" + totalMaps ) ; s . append ( " (" + mapProgress + ")" ) ; s . append ( " Reduces : " + completedReduces + "/" + totalReduces ) ; s . append ( " (" + reduceProgress + ")" ) ; return s . toString ( ) ; }
Returns a string representation of this job status
32,016
void update ( JobStatus status ) { this . status = status ; try { this . counters = running . getCounters ( ) ; this . completed = running . isComplete ( ) ; this . successful = running . isSuccessful ( ) ; this . mapProgress = running . mapProgress ( ) ; this . reduceProgress = running . reduceProgress ( ) ; } catch ( IOException ioe ) { ioe . printStackTrace ( ) ; } this . completedMaps = ( int ) ( this . totalMaps * this . mapProgress ) ; this . completedReduces = ( int ) ( this . totalReduces * this . reduceProgress ) ; }
Update this job status according to the given JobStatus
32,017
private synchronized void setFlush ( boolean immediateFlush ) { try { Set < FileAppender > flushedFileAppenders = new HashSet < FileAppender > ( ) ; Enumeration < ? > currentLoggers = LogManager . getLoggerRepository ( ) . getCurrentLoggers ( ) ; while ( currentLoggers . hasMoreElements ( ) ) { Object nextLogger = currentLoggers . nextElement ( ) ; if ( nextLogger instanceof Logger ) { Logger currentLogger = ( Logger ) nextLogger ; Enumeration < ? > allAppenders = currentLogger . getParent ( ) . getAllAppenders ( ) ; while ( allAppenders . hasMoreElements ( ) ) { Object nextElement = allAppenders . nextElement ( ) ; if ( nextElement instanceof FileAppender ) { FileAppender fileAppender = ( FileAppender ) nextElement ; if ( ! flushedFileAppenders . contains ( fileAppender ) ) { flushedFileAppenders . add ( fileAppender ) ; fileAppender . setImmediateFlush ( immediateFlush ) ; } } } } } } catch ( Throwable e ) { LOG . error ( "Failed flushing logs" , e ) ; } }
Set immediateFlush property for all file appenders .
32,018
public static void readState ( String fname ) { filename = fname ; try { persData . load ( new FileInputStream ( filename ) ) ; } catch ( FileNotFoundException e1 ) { } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Read the state of parsing for all open log files from a property file .
32,019
public static ParseState getState ( String fname ) { String [ ] fields = persData . getProperty ( fname , "null" + SEPARATOR + "0" ) . split ( SEPARATOR , 2 ) ; String firstLine ; long offset ; if ( fields . length < 2 ) { System . err . println ( "Malformed persistent state data found" ) ; Environment . logInfo ( "Malformed persistent state data found" ) ; firstLine = null ; offset = 0 ; } else { firstLine = ( fields [ 0 ] . equals ( "null" ) ? null : fields [ 0 ] ) ; offset = Long . parseLong ( fields [ 1 ] ) ; } return new ParseState ( fname , firstLine , offset ) ; }
Read and return the state of parsing for a particular log file .
32,020
public static void setState ( ParseState state ) { if ( state == null ) { System . err . println ( "Null state found" ) ; Environment . logInfo ( "Null state found" ) ; } persData . setProperty ( state . filename , state . firstLine + SEPARATOR + state . offset ) ; }
Set the state of parsing for a particular log file .
32,021
public static void updateState ( String filename , String firstLine , long offset ) { ParseState ps = getState ( filename ) ; if ( firstLine != null ) ps . firstLine = firstLine ; ps . offset = offset ; setState ( ps ) ; }
Upadate the state of parsing for a particular log file .
32,022
public static void writeState ( String fname ) { try { persData . store ( new FileOutputStream ( fname ) , Calendar . getInstance ( ) . getTime ( ) . toString ( ) ) ; } catch ( FileNotFoundException e1 ) { e1 . printStackTrace ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Write the state of parsing for all open log files to a property file on disk .
32,023
protected String getCountQuery ( ) { if ( dbConf . getInputCountQuery ( ) != null ) { return dbConf . getInputCountQuery ( ) ; } StringBuilder query = new StringBuilder ( ) ; query . append ( "SELECT COUNT(*) FROM " + tableName ) ; if ( conditions != null && conditions . length ( ) > 0 ) query . append ( " WHERE " + conditions ) ; return query . toString ( ) ; }
Returns the query for getting the total number of rows subclasses can override this for custom behaviour .
32,024
public void delete ( ) { try { getDFS ( ) . delete ( this . path , true ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; MessageDialog . openWarning ( null , "Delete file" , "Unable to delete file \"" + this . path + "\"\n" + e ) ; } }
Does a recursive delete of the remote directory tree at this node .
32,025
DistributedFileSystem getDFS ( ) throws IOException { if ( this . dfs == null ) { FileSystem fs = location . getDFS ( ) ; if ( ! ( fs instanceof DistributedFileSystem ) ) { ErrorMessageDialog . display ( "DFS Browser" , "The DFS Browser cannot browse anything else " + "but a Distributed File System!" ) ; throw new IOException ( "DFS Browser expects a DistributedFileSystem!" ) ; } this . dfs = ( DistributedFileSystem ) fs ; } return this . dfs ; }
Gets a connection to the DFS
32,026
int getCap ( int totalRunnableTasks , int localMaxTasks , int totalSlots ) { double load = maxDiff + ( ( double ) totalRunnableTasks ) / totalSlots ; int cap = ( int ) Math . min ( localMaxTasks , Math . ceil ( load * localMaxTasks ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "load:" + load + " maxDiff:" + maxDiff + " totalRunnable:" + totalRunnableTasks + " totalSlots:" + totalSlots + " localMaxTasks:" + localMaxTasks + " cap:" + cap ) ; } return cap ; }
Determine how many tasks of a given type we want to run on a TaskTracker . This cap is chosen based on how many tasks of that type are outstanding in total so that when the cluster is used below capacity tasks are spread out uniformly across the nodes rather than being clumped up on whichever machines sent out heartbeats earliest .
32,027
private void tryReloadingEditLog ( ) throws IOException { LOG . info ( "Segment - trying to reload edit log segment" ) ; sleep ( errorSleepTimeout ) ; checkProgress ( ) ; setupIngestStreamWithRetries ( currentSegmentTxId ) ; refreshStreamPosition ( ) ; }
On error when reading transactions from the stream try reloading the input stream .
32,028
private void updateState ( FSEditLogOp op , boolean checkTxnId ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . SERVERLOGREADER_UPDATE , op ) ; if ( checkTxnId ) { mostRecentlyReadTransactionTxId = ServerLogReaderUtil . checkTransactionId ( mostRecentlyReadTransactionTxId , op ) ; } updateStreamPosition ( ) ; core . getMetrics ( ) . readOperations . inc ( ) ; mostRecentlyReadTransactionTime = now ( ) ; if ( op . opCode == FSEditLogOpCodes . OP_END_LOG_SEGMENT ) { LOG . info ( "Segment - ending log segment start txid: " + currentSegmentTxId + ", end txid: " + op . getTransactionId ( ) ) ; currentSegmentTxId = op . getTransactionId ( ) + 1 ; currentEditLogInputStream = null ; currentEditLogInputStreamPosition = - 1 ; } else if ( op . opCode == FSEditLogOpCodes . OP_START_LOG_SEGMENT ) { LOG . info ( "Segment - starting log segment start txid: " + currentSegmentTxId ) ; } }
For each operation read from the stream check if this is a closing transaction . If so we are sure we need to move to the next segment .
32,029
private void refreshStreamPosition ( ) throws IOException { if ( currentEditLogInputStreamPosition != - 1 ) { currentEditLogInputStream . refresh ( currentEditLogInputStreamPosition , mostRecentlyReadTransactionTxId ) ; } else { currentEditLogInputStreamPosition = currentEditLogInputStream . getPosition ( ) ; } }
Called to refresh the position in the current input stream to the last ACK d one .
32,030
private void refreshInputStream ( ) throws IOException { if ( currentEditLogInputStream == null ) { LOG . info ( "Segment - setup input stream for txid: " + currentSegmentTxId ) ; setupIngestStreamWithRetries ( currentSegmentTxId ) ; if ( currentEditLogInputStreamPosition == - 1 ) { currentEditLogInputStreamPosition = currentEditLogInputStream . getPosition ( ) ; } } if ( currentEditLogInputStreamPosition != - 1 ) { currentEditLogInputStream . refresh ( currentEditLogInputStreamPosition , mostRecentlyReadTransactionTxId ) ; } }
On normal activity when we reach END_LOG_SEGMENT we will null the stream and at next read we want to instantiate a stream for next segment .
32,031
protected void initialize ( ) throws IOException { for ( int i = 0 ; i < 3 ; i ++ ) { try { LOG . info ( "Detecting current primary node - attempt " + i ) ; detectJournalManager ( ) ; LOG . info ( "Finding oldest segment txid - attempt " + i ) ; currentSegmentTxId = findOldestLogSegmentTxid ( ) ; LOG . info ( "Setting up input stream for txid: " + currentSegmentTxId + " - attempt " + i ) ; setupIngestStreamWithRetries ( currentSegmentTxId ) ; return ; } catch ( IOException e ) { LOG . warn ( "Initialization exception" , e ) ; if ( i == 2 ) { LOG . error ( "Initialization failed." ) ; throw e ; } } } }
Initialize first stream . Just for startup we are extra careful to try 3 times as there is potential race between finding the txid and then setting up the stream .
32,032
private void setupIngestStreamWithRetries ( long txid ) throws IOException { for ( int i = 0 ; i < inputStreamRetries ; i ++ ) { try { setupCurrentEditStream ( txid ) ; return ; } catch ( IOException e ) { if ( i == inputStreamRetries - 1 ) { throw new IOException ( "Cannot obtain stream for txid: " + txid , e ) ; } LOG . info ( "Error :" , e ) ; } sleep ( 1000 ) ; LOG . info ( "Retrying to get edit input stream for txid: " + txid + ", tried: " + ( i + 1 ) + " times" ) ; } }
Setup the input stream to be consumed by the reader with retries on failures .
32,033
private void setupCurrentEditStream ( long txid ) throws IOException { currentEditLogInputStream = JournalSet . getInputStream ( remoteJournalManager , txid ) ; currentSegmentTxId = txid ; mostRecentlyReadTransactionTime = now ( ) ; }
Setup the input stream to be consumed by the reader . The input stream corresponds to a single segment .
32,034
boolean segmentExists ( long txid ) throws IOException { List < RemoteEditLog > segments = getManifest ( ) ; for ( RemoteEditLog segment : segments ) { if ( segment . getStartTxId ( ) == txid ) { return true ; } } return false ; }
Check if a segment of a given txid exists in the underlying storage directory . Whne the reader cannot read any new data it will periodically check if there was some unclean shutdown which results in an unfinalized log .
32,035
List < RemoteEditLog > getManifest ( ) throws IOException { RemoteEditLogManifest rm = remoteJournalManager . getEditLogManifest ( - 1 ) ; if ( rm == null || rm . getLogs ( ) . size ( ) == 0 ) { throw new IOException ( "Cannot obtain the list of log segments" ) ; } return rm . getLogs ( ) ; }
Get all available log segments present in the underlying storage directory . This function will never return null or empty list of segments - it will throw exception in this case .
32,036
protected void sleep ( long ms ) throws IOException { try { Thread . sleep ( ms ) ; } catch ( InterruptedException e ) { LOG . error ( "Interrupted when sleeping" , e ) ; Thread . currentThread ( ) . interrupt ( ) ; throw new IOException ( "Received interruption" ) ; } }
Sleep for n milliseconds . Throw IOException when interrupted .
32,037
static public CompressionType getCompressionType ( Configuration job ) { String name = job . get ( "io.seqfile.compression.type" ) ; return name == null ? CompressionType . RECORD : CompressionType . valueOf ( name ) ; }
Get the compression type for the reduce outputs
32,038
static public void setCompressionType ( Configuration job , CompressionType val ) { job . set ( "io.seqfile.compression.type" , val . toString ( ) ) ; }
Set the compression type for sequence files .
32,039
K put ( final K name ) { K internal = cache . get ( name ) ; if ( internal != null ) { lookups ++ ; return internal ; } if ( ! initialized ) { UseCount useCount = transientMap . get ( name ) ; if ( useCount != null ) { useCount . increment ( ) ; if ( useCount . get ( ) >= useThreshold ) { promote ( name ) ; } return useCount . value ; } useCount = new UseCount ( name ) ; transientMap . put ( name , useCount ) ; } return null ; }
Add a given name to the cache or track use count . exist . If the name already exists then the internal value is returned .
32,040
void initialized ( ) { LOG . info ( "initialized with " + size ( ) + " entries " + lookups + " lookups" ) ; this . initialized = true ; transientMap . clear ( ) ; transientMap = null ; }
Mark the name cache as initialized . The use count is no longer tracked and the transient map used for initializing the cache is discarded to save heap space .
32,041
private void promote ( final K name ) { transientMap . remove ( name ) ; cache . put ( name , name ) ; lookups += useThreshold ; }
Promote a frequently used name to the cache
32,042
@ SuppressWarnings ( "unchecked" ) protected final void initialize ( int maxSize ) { size = 0 ; int heapSize = maxSize + 1 ; heap = ( T [ ] ) new Object [ heapSize ] ; this . maxSize = maxSize ; }
Subclass constructors must call this .
32,043
private void checkState ( ) { Preconditions . checkNotNull ( logs ) ; if ( ! contiguous ) return ; RemoteEditLog prev = null ; for ( RemoteEditLog log : logs ) { if ( prev != null ) { if ( log . getStartTxId ( ) <= prev . getEndTxId ( ) ) { throw new IllegalStateException ( "Invalid log manifest:" + this ) ; } } prev = log ; } }
Check that the logs are contiguous and non - overlapping sequences of transactions in sorted order
32,044
public double getSortProcessingRate ( long currentTime ) { long timeSpentSorting = 0 ; float progress = 0 ; Phase phase = getPhase ( ) ; long sortFinishTime = getSortFinishTime ( ) ; long shuffleFinishTime = getShuffleFinishTime ( ) ; if ( phase == Phase . SHUFFLE ) { return 0 ; } else if ( getPhase ( ) == Phase . SORT ) { if ( shuffleFinishTime < currentTime ) { LOG . error ( "Shuffle finish time is " + shuffleFinishTime + " which is < current time " + currentTime + " in " + this . getTaskID ( ) ) ; } timeSpentSorting = currentTime - shuffleFinishTime ; progress = getProgress ( ) - ( float ) 1.0 / 3 ; if ( progress < 0 ) { LOG . error ( "Shuffle progress calculated to be " + progress + " in task status for " + this . getTaskID ( ) + ". Settings to 0" ) ; progress = 0 ; } } else if ( getPhase ( ) == Phase . REDUCE ) { progress = ( float ) 1.0 / 3 ; if ( shuffleFinishTime <= sortFinishTime ) { LOG . error ( "Shuffle finish fime is " + shuffleFinishTime + " which is <= sort finish time " + sortFinishTime + " in " + this . getTaskID ( ) ) ; return 0 ; } timeSpentSorting = sortFinishTime - shuffleFinishTime ; } sortProcessingRate = progress / timeSpentSorting ; return sortProcessingRate ; }
for sort phase use the accumulated progress rate as the processing rate
32,045
public double getReduceProcessingRate ( long currentTime ) { Phase phase = getPhase ( ) ; if ( phase != Phase . REDUCE ) { return 0 ; } @ SuppressWarnings ( "deprecation" ) long bytesProcessed = super . getCounters ( ) . findCounter ( Task . Counter . REDUCE_INPUT_BYTES ) . getCounter ( ) ; long timeSpentInReduce = 0 ; long sortFinishTime = getSortFinishTime ( ) ; if ( sortFinishTime >= currentTime ) { LOG . error ( "Sort finish time is " + sortFinishTime + " which is >= current time " + currentTime + " in " + this . getTaskID ( ) ) ; return 0 ; } timeSpentInReduce = currentTime - sortFinishTime ; reduceProcessingRate = bytesProcessed / timeSpentInReduce ; return reduceProcessingRate ; }
as the processing rate
32,046
private void readSessions ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "sessions" ) ; coronaSerializer . readStartObjectToken ( "sessions" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String sessionId = coronaSerializer . getFieldName ( ) ; Session session = new Session ( clusterManager . conf . getCMHeartbeatDelayMax ( ) , coronaSerializer ) ; sessions . put ( sessionId , session ) ; current = coronaSerializer . nextToken ( ) ; } }
Reads back the sessions map from a JSON stream
32,047
public void restoreAfterSafeModeRestart ( ) { if ( ! clusterManager . safeMode ) { return ; } for ( Session session : sessions . values ( ) ) { for ( ResourceRequestInfo resourceRequestInfo : session . idToRequest . values ( ) ) { clusterManager . nodeManager . restoreResourceRequestInfo ( resourceRequestInfo ) ; } session . restoreAfterSafeModeRestart ( ) ; clusterManager . getScheduler ( ) . addSession ( session . getSessionId ( ) , session ) ; } clusterManager . getMetrics ( ) . setNumRunningSessions ( sessions . size ( ) ) ; }
This method rebuilds members related to the SessionManager instance which were not directly persisted themselves .
32,048
public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "sessions" ) ; jsonGenerator . writeStartObject ( ) ; for ( String sessionId : sessions . keySet ( ) ) { jsonGenerator . writeFieldName ( sessionId ) ; sessions . get ( sessionId ) . write ( jsonGenerator ) ; } jsonGenerator . writeEndObject ( ) ; jsonGenerator . writeNumberField ( "sessionCounter" , sessionCounter . longValue ( ) ) ; jsonGenerator . writeEndObject ( ) ; }
Used to write the state of the SessionManager instance to disk when we are persisting the state of the ClusterManager
32,049
public Map < PoolInfo , Long > getTypePoolInfoAveFirstWaitMs ( ResourceType type ) { Map < PoolInfo , WaitCount > poolInfoWaitCount = new HashMap < PoolInfo , WaitCount > ( ) ; for ( Session session : sessions . values ( ) ) { synchronized ( session ) { if ( ! session . isDeleted ( ) ) { Long wait = session . getTypeFirstWaitMs ( type ) ; if ( wait == null ) { continue ; } WaitCount waitCount = poolInfoWaitCount . get ( session . getPoolInfo ( ) ) ; if ( waitCount == null ) { poolInfoWaitCount . put ( session . getPoolInfo ( ) , new WaitCount ( wait ) ) ; } else { waitCount . addWaitMsecs ( wait ) ; } } } } Map < PoolInfo , Long > poolInfoWaitMs = new HashMap < PoolInfo , Long > ( poolInfoWaitCount . size ( ) ) ; for ( Map . Entry < PoolInfo , WaitCount > entry : poolInfoWaitCount . entrySet ( ) ) { poolInfoWaitMs . put ( entry . getKey ( ) , entry . getValue ( ) . getAverageWait ( ) ) ; } return poolInfoWaitMs ; }
Get a map of pool infos to average wait times for first resource of a resource type .
32,050
protected DFSClient getDFSClient ( HttpServletRequest request ) throws IOException , InterruptedException { Configuration conf = new Configuration ( masterConf ) ; UnixUserGroupInformation . saveToConf ( conf , UnixUserGroupInformation . UGI_PROPERTY_NAME , getUGI ( request ) ) ; return JspHelper . getDFSClient ( request , conf ) ; }
getting a client for connecting to dfs
32,051
private DatanodeID [ ] getDatanodes ( HttpServletRequest request ) throws IOException { final String datanodes = request . getParameter ( "candidates" ) ; if ( datanodes == null ) { return null ; } final String [ ] datanodeStrs = datanodes . split ( " " ) ; if ( datanodeStrs . length == 0 ) { return null ; } final DatanodeID [ ] dnIDs = new DatanodeID [ datanodeStrs . length ] ; for ( int i = 0 ; i < dnIDs . length ; i ++ ) { String hostName = datanodeStrs [ i ] ; int colon = datanodeStrs [ i ] . indexOf ( ":" ) ; if ( colon < 0 ) { throw new IOException ( "Invalid datanode name " + datanodeStrs [ i ] + ", expecting name:port pair" ) ; } hostName = datanodeStrs [ i ] . substring ( 0 , colon ) ; int infoPort ; try { infoPort = Integer . parseInt ( datanodeStrs [ i ] . substring ( colon + 1 ) ) ; } catch ( NumberFormatException ne ) { throw new IOException ( "Invalid datanode name " + datanodeStrs [ i ] + ", expecting name:port pair" , ne ) ; } dnIDs [ i ] = new DatanodeID ( hostName , null , infoPort , - 1 ) ; } return dnIDs ; }
Get the datanode candidates from the request
32,052
int loadFSEdits ( EditLogInputStream edits , long lastAppliedTxId ) throws IOException { long startTime = now ( ) ; this . lastAppliedTxId = lastAppliedTxId ; int numEdits = loadFSEdits ( edits , true ) ; FSImage . LOG . info ( "Edits file " + edits . toString ( ) + " of size: " + edits . length ( ) + ", # of edits: " + numEdits + " loaded in: " + ( now ( ) - startTime ) / 1000 + " seconds." ) ; return numEdits ; }
Load an edit log and apply the changes to the in - memory structure This is where we apply edits that we ve been writing to disk all along .
32,053
private void checkFail ( String errorMsg ) throws IOException { if ( fsNamesys . failOnTxIdMismatch ( ) ) { FSEditLog . LOG . error ( errorMsg ) ; throw new IOException ( errorMsg ) ; } MetaRecoveryContext . editLogLoaderPrompt ( errorMsg ) ; }
When encountering an error while loading the transaction we can skip the problematic transaction and continue first prompting the user . This will only be possible when NN is started with appropriate option .
32,054
public static EditLogValidation validateEditLog ( EditLogInputStream in ) { long lastPos = 0 ; long firstTxId = HdfsConstants . INVALID_TXID ; long lastTxId = HdfsConstants . INVALID_TXID ; long numValid = 0 ; try { FSEditLogOp op = null ; while ( true ) { lastPos = in . getPosition ( ) ; try { if ( ( op = in . readOp ( ) ) == null ) { break ; } } catch ( Throwable t ) { FSImage . LOG . warn ( "Caught exception after reading " + numValid + " ops from " + in + " while determining its valid length." + "Position was " + lastPos , t ) ; in . resync ( ) ; FSImage . LOG . info ( "After resync, position is " + in . getPosition ( ) ) ; continue ; } if ( firstTxId == HdfsConstants . INVALID_TXID ) { firstTxId = op . getTransactionId ( ) ; } if ( lastTxId == HdfsConstants . INVALID_TXID || op . txid > lastTxId ) { lastTxId = op . getTransactionId ( ) ; } else { FSImage . LOG . error ( "Out of order txid found. Found " + op . txid + ", expected " + ( lastTxId + 1 ) ) ; } numValid ++ ; } } catch ( Throwable t ) { FSImage . LOG . debug ( "Caught exception after reading " + numValid + " ops from " + in + " while determining its valid length." , t ) ; } return new EditLogValidation ( lastPos , firstTxId , lastTxId , false ) ; }
Return the number of valid transactions in the stream . If the stream is truncated during the header returns a value indicating that there are 0 valid transactions . This reads through the stream but does not close it .
32,055
public void initTasks ( ) throws IOException { jobHistory . logSubmitted ( jobFile . toString ( ) , this . startTime , this . jobTrackerId ) ; JobClient . RawSplit [ ] splits = null ; splits = JobClient . getAndRemoveCachedSplits ( jobId ) ; if ( splits == null ) { FileSystem fs = jobFile . getFileSystem ( jobConf ) ; Path splitFile = new Path ( jobFile . getParent ( ) , "job.split" ) ; LOG . info ( "Reading splits from " + splitFile ) ; DataInputStream splitFileIn = fs . open ( splitFile ) ; try { splits = JobClient . readSplitFile ( splitFileIn ) ; } finally { splitFileIn . close ( ) ; } } initTasksFromSplits ( splits ) ; jobHistory . logInited ( this . launchTime , numMapTasks , numReduceTasks ) ; }
Read input splits and create a map per split .
32,056
private TaskInProgress removeMatchingTipUnprotectedUnconditional ( List < TaskInProgress > taskList , TaskInProgress intendedTip ) { for ( Iterator < TaskInProgress > iter = taskList . iterator ( ) ; iter . hasNext ( ) ; ) { TaskInProgress t = iter . next ( ) ; if ( t . getTIPId ( ) . equals ( intendedTip . getTIPId ( ) ) ) { iter . remove ( ) ; return t ; } } return null ; }
Removes matching TIP without checking any conditions
32,057
public Task forceNewReduceTaskForTip ( String taskTrackerName , String hostName , TaskInProgress forcedTip ) { synchronized ( lockObject ) { Task result = obtainTaskCleanupTask ( taskTrackerName , forcedTip ) ; if ( result != null ) { return result ; } removeMatchingTipUnprotectedUnconditional ( nonRunningMaps , forcedTip ) ; LOG . info ( "Running task " + forcedTip . getTIPId ( ) + " on " + taskTrackerName + "(" + hostName + ")" ) ; scheduleReduceUnprotected ( forcedTip ) ; result = forcedTip . getTaskToRun ( taskTrackerName ) ; if ( result != null ) { addRunningTaskToTIPUnprotected ( forcedTip , result . getTaskID ( ) , taskTrackerName , hostName , true ) ; setJobCleanupTaskState ( result ) ; } return result ; } }
Registers new task attempt for given task
32,058
public boolean canTrackerBeUsed ( String taskTracker , String trackerHost , TaskInProgress tip ) { synchronized ( lockObject ) { return ! tip . hasFailedOnMachine ( trackerHost ) ; } }
Can a tracker be used for a TIP?
32,059
private void setJobCleanupTaskState ( Task task ) { if ( task . isJobCleanupTask ( ) ) { if ( jobFailed ) { task . setJobCleanupTaskState ( org . apache . hadoop . mapreduce . JobStatus . State . FAILED ) ; } else if ( jobKilled ) { task . setJobCleanupTaskState ( org . apache . hadoop . mapreduce . JobStatus . State . KILLED ) ; } else { task . setJobCleanupTaskState ( org . apache . hadoop . mapreduce . JobStatus . State . SUCCEEDED ) ; } } }
Sets task state according to job state if given task is cleanup one
32,060
public static String [ ] getJournalHttpHosts ( Configuration conf ) { Collection < String > hosts = conf . getStringCollection ( JournalConfigKeys . DFS_JOURNALNODE_HOSTS ) ; int defaultHttpPort = JournalConfigKeys . DFS_JOURNALNODE_HTTP_PORT_DEFAULT ; String [ ] httpAddresses = new String [ hosts . size ( ) ] ; int i = 0 ; for ( String address : hosts ) { if ( address . indexOf ( ":" ) < 0 ) { address += ":" + defaultHttpPort ; } httpAddresses [ i ++ ] = address ; } return httpAddresses ; }
Parse the DFS_JOURNALNODE_HOSTS to get the list of Journal Node Hosts
32,061
public static InetSocketAddress getAddress ( Configuration conf ) { String addr = conf . get ( JournalConfigKeys . DFS_JOURNALNODE_HTTP_ADDRESS_KEY , JournalConfigKeys . DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT ) ; return NetUtils . createSocketAddr ( addr , JournalConfigKeys . DFS_JOURNALNODE_HTTP_PORT_DEFAULT ) ; }
Get the address bound by the JournalNode to start the web server .
32,062
public synchronized boolean addChild ( Node child ) { if ( child . parent != null ) { throw new IllegalArgumentException ( "The child is already under another node:" + child . parent ) ; } checkChildren ( ) ; boolean retval = children . add ( child ) ; if ( retval ) child . parent = this ; return retval ; }
Add a child node to this node .
32,063
static private void updateShares ( Cluster clusters [ ] ) { assert ( clusters . length == 2 ) ; if ( clusters [ 0 ] . runnableMaps == 0 && clusters [ 0 ] . runnableMaps == 0 && clusters [ 1 ] . runnableReduces == 0 && clusters [ 1 ] . runnableReduces == 0 ) { return ; } if ( ! ( clusters [ 0 ] . runnableMaps == 0 && clusters [ 1 ] . runnableMaps == 0 ) ) { clusters [ 0 ] . targetMapShare = clusters [ 0 ] . runnableMaps * clusters [ 0 ] . weight / ( clusters [ 0 ] . runnableMaps * clusters [ 0 ] . weight + clusters [ 1 ] . runnableMaps * clusters [ 1 ] . weight ) ; clusters [ 1 ] . targetMapShare = 1 - clusters [ 0 ] . targetMapShare ; } if ( ! ( clusters [ 0 ] . runnableReduces == 0 && clusters [ 1 ] . runnableReduces == 0 ) ) { clusters [ 0 ] . targetReduceShare = clusters [ 0 ] . runnableReduces * clusters [ 0 ] . weight / ( clusters [ 0 ] . runnableReduces * clusters [ 0 ] . weight + clusters [ 1 ] . runnableReduces * clusters [ 1 ] . weight ) ; clusters [ 1 ] . targetReduceShare = 1 - clusters [ 0 ] . targetReduceShare ; } for ( int i = 0 ; i < 2 ; ++ i ) { LOG . info ( String . format ( "Update Shares. " + "cluster%s:%s runnableMaps:%s runnableReduces:%s " + "weight:%s targetMapShare:%s targetReduceShare:%s" , i , clusters [ i ] . address , clusters [ i ] . weight , clusters [ i ] . runnableMaps , clusters [ i ] . runnableReduces , clusters [ i ] . targetMapShare , clusters [ i ] . targetReduceShare ) ) ; } }
Update the task share of the clusters
32,064
public void run ( ) { long lastUpdate = - 1L ; while ( running ) { try { Thread . sleep ( updateInterval / 10 ) ; long now = JobTracker . getClock ( ) . getTime ( ) ; if ( now - lastUpdate > updateInterval ) { lastUpdate = now ; doMoveSlots ( clusters ) ; } } catch ( Exception e ) { LOG . error ( "Exception while balancing cluster." , e ) ; } } }
Keep moving slots between two clusters according to their runnable tasks . These clusters are assumed to run tasktrackers on the same set of machines
32,065
private int getTotalSlots ( TaskTrackerStatus status , TaskType type ) { Map < Integer , Integer > defaultCpuToMaxSlots = ( type == TaskType . MAP ) ? defaultCpuToMaxMapSlots : defaultCpuToMaxReduceSlots ; int cpus = status . getResourceStatus ( ) . getNumProcessors ( ) ; Integer slots = defaultCpuToMaxSlots . get ( cpus ) ; if ( slots == null ) { slots = ( type == TaskType . MAP ) ? defaultMaxMapSlots : defaultMaxReduceSlots ; } int taskTrackerSlots = ( type == TaskType . MAP ) ? status . getMaxMapSlots ( ) : status . getMaxReduceSlots ( ) ; return Math . min ( slots , taskTrackerSlots ) ; }
Obtain the two clusters combined total slots of a tasktracker
32,066
private static FairSchedulerProtocol createClient ( String target , Configuration conf ) throws IOException { InetSocketAddress addr = NetUtils . createSocketAddr ( target ) ; UserGroupInformation ugi = UserGroupInformation . getCurrentUGI ( ) ; LOG . info ( "Connecting to " + addr ) ; return ( FairSchedulerProtocol ) RPC . getProxy ( FairSchedulerProtocol . class , FairSchedulerProtocol . versionID , addr , ugi , conf , NetUtils . getSocketFactory ( conf , FairSchedulerProtocol . class ) ) ; }
Create a FariScheduler RPC client
32,067
public static void main ( String argv [ ] ) { StringUtils . startupShutdownMessage ( HourGlass . class , argv , LOG ) ; try { HourGlass hourGlass = new HourGlass ( new Configuration ( ) ) ; hourGlass . run ( ) ; } catch ( Throwable e ) { LOG . fatal ( StringUtils . stringifyException ( e ) ) ; System . exit ( - 1 ) ; } }
Start the HourGlass process
32,068
public synchronized boolean reloadConfigsIfNecessary ( ) { long time = RaidNode . now ( ) ; if ( time > lastReloadAttempt + reloadInterval ) { lastReloadAttempt = time ; try { File file = new File ( configFileName ) ; long lastModified = file . lastModified ( ) ; if ( lastModified > lastSuccessfulReload && time > lastModified + RELOAD_WAIT ) { reloadConfigs ( ) ; lastSuccessfulReload = time ; lastReloadAttemptFailed = false ; return true ; } } catch ( Exception e ) { if ( ! lastReloadAttemptFailed ) { LOG . error ( "Failed to reload config file - " + "will use existing configuration." , e ) ; } lastReloadAttemptFailed = true ; } } return false ; }
Reload config file if it hasn t been loaded in a while Returns true if the file was reloaded .
32,069
void stopReload ( ) throws InterruptedException { if ( reloadThread != null ) { running = false ; reloadThread . interrupt ( ) ; reloadThread . join ( ) ; reloadThread = null ; } }
Stop the background thread that reload the config file
32,070
PolicyInfo getPolicy ( String policyName ) { for ( PolicyInfo policy : allPolicies ) { if ( policyName . equals ( policy . getName ( ) ) ) { return policy ; } } return null ; }
Find the PolicyInfo corresponding to a given policy name
32,071
public void stop ( ) { if ( stopRequested ) { return ; } stopRequested = true ; running = false ; if ( server != null ) server . stop ( ) ; if ( triggerThread != null ) { triggerThread . interrupt ( ) ; triggerMonitor = null ; } if ( urfThread != null ) { urfThread . interrupt ( ) ; urfProcessor = null ; } if ( blockIntegrityMonitor != null ) blockIntegrityMonitor . running = false ; if ( blockFixerThread != null ) blockFixerThread . interrupt ( ) ; if ( blockCopierThread != null ) blockCopierThread . interrupt ( ) ; if ( corruptFileCounterThread != null ) corruptFileCounterThread . interrupt ( ) ; if ( purgeMonitor != null ) purgeMonitor . running = false ; if ( purgeThread != null ) purgeThread . interrupt ( ) ; if ( placementMonitor != null ) placementMonitor . stop ( ) ; if ( statsCollector != null ) statsCollector . stop ( ) ; if ( statsCollectorThread != null ) statsCollectorThread . interrupt ( ) ; if ( infoServer != null ) { try { infoServer . stop ( ) ; } catch ( Exception e ) { LOG . warn ( "Exception shutting down " + RaidNode . class , e ) ; } } this . unregisterMBean ( ) ; }
Stop all RaidNode threads and wait for all to finish .
32,072
public Map < String , Map < CorruptFileStatus , Long > > getCorruptFilesCounterMap ( ) { return ( ( CorruptionWorker ) blockIntegrityMonitor . getCorruptionMonitor ( ) ) . getCorruptFilesCounterMap ( ) ; }
the value is counters of different types of corrupt files
32,073
public PolicyInfo determinePolicy ( Codec codec ) { for ( PolicyInfo info : configMgr . getAllPolicies ( ) ) { if ( ! info . getShouldRaid ( ) ) { continue ; } if ( info . getCodecId ( ) . equals ( codec . id ) ) { return info ; } } return null ; }
Determine a PolicyInfo from the codec to re - generate the parity files of modified source files .
32,074
public static boolean doRaid ( Configuration conf , FileStatus stat , Path destPath , Codec codec , Statistics statistics , Progressable reporter , boolean doSimulate , int targetRepl , int metaRepl ) throws IOException { boolean succeed = false ; for ( EncodingCandidate ec : RaidNode . splitPaths ( conf , codec , stat ) ) { succeed = succeed || doRaid ( conf , ec , destPath , codec , statistics , reporter , doSimulate , targetRepl , metaRepl ) ; } return succeed ; }
only used by test
32,075
public static boolean raidedByOtherHighPriCodec ( Configuration conf , FileStatus stat , Codec codec ) throws IOException { for ( Codec tcodec : Codec . getCodecs ( ) ) { if ( tcodec . priority > codec . priority ) { if ( stat . isDir ( ) && ! tcodec . isDirRaid ) { continue ; } if ( ParityFilePair . parityExists ( stat , tcodec , conf ) ) { InjectionHandler . processEvent ( InjectionEvent . RAID_ENCODING_SKIP_PATH ) ; return true ; } } } return false ; }
check if the file is already raided by high priority codec
32,076
private static LOGRESULTS doDirRaid ( Configuration conf , EncodingCandidate ec , Path destPath , Codec codec , Statistics statistics , Progressable reporter , boolean doSimulate , int targetRepl , int metaRepl ) throws IOException { FileStatus stat = ec . srcStat ; Path p = stat . getPath ( ) ; FileSystem srcFs = p . getFileSystem ( conf ) ; List < FileStatus > lfs = RaidNode . listDirectoryRaidFileStatus ( conf , srcFs , p ) ; if ( lfs == null ) { return LOGRESULTS . NOACTION ; } long blockNum = DirectoryStripeReader . getBlockNum ( lfs ) ; if ( blockNum <= 2 ) { return LOGRESULTS . NOACTION ; } long diskSpace = 0 ; int srcRepl = 0 ; for ( FileStatus fsStat : lfs ) { diskSpace += ( fsStat . getLen ( ) * fsStat . getReplication ( ) ) ; if ( fsStat . getReplication ( ) > srcRepl ) { srcRepl = fsStat . getReplication ( ) ; } } long parityBlockSize = DirectoryStripeReader . getParityBlockSize ( conf , lfs ) ; statistics . numProcessedBlocks += blockNum ; statistics . processedSize += diskSpace ; boolean parityGenerated = false ; try { parityGenerated = generateParityFile ( conf , ec , targetRepl , reporter , srcFs , destPath , codec , blockNum , srcRepl , metaRepl , parityBlockSize , lfs ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } if ( ! parityGenerated ) return LOGRESULTS . NOACTION ; if ( ! doSimulate ) { for ( FileStatus fsStat : lfs ) { if ( srcFs . setReplication ( fsStat . getPath ( ) , ( short ) targetRepl ) == false ) { LOG . info ( "Error in reducing replication of " + fsStat . getPath ( ) + " to " + targetRepl ) ; statistics . remainingSize += diskSpace ; return LOGRESULTS . FAILURE ; } } ; } diskSpace = 0 ; for ( FileStatus fsStat : lfs ) { diskSpace += ( fsStat . getLen ( ) * targetRepl ) ; } statistics . remainingSize += diskSpace ; long numMeta = blockNum / codec . stripeLength ; if ( blockNum % codec . stripeLength != 0 ) { numMeta ++ ; } statistics . numMetaBlocks += ( numMeta * metaRepl ) ; statistics . metaSize += ( numMeta * metaRepl * parityBlockSize ) ; return LOGRESULTS . SUCCESS ; }
RAID an individual directory
32,077
private static LOGRESULTS doFileRaid ( Configuration conf , EncodingCandidate ec , Path destPath , Codec codec , Statistics statistics , Progressable reporter , boolean doSimulate , int targetRepl , int metaRepl ) throws IOException , InterruptedException { FileStatus stat = ec . srcStat ; Path p = stat . getPath ( ) ; FileSystem srcFs = p . getFileSystem ( conf ) ; BlockLocation [ ] locations = srcFs . getFileBlockLocations ( stat , 0 , stat . getLen ( ) ) ; if ( locations . length <= 2 ) { return LOGRESULTS . NOACTION ; } long diskSpace = 0 ; for ( BlockLocation l : locations ) { diskSpace += ( l . getLength ( ) * stat . getReplication ( ) ) ; } statistics . numProcessedBlocks += locations . length ; statistics . processedSize += diskSpace ; boolean parityGenerated = generateParityFile ( conf , ec , targetRepl , reporter , srcFs , destPath , codec , locations . length , stat . getReplication ( ) , metaRepl , stat . getBlockSize ( ) , null ) ; if ( ! parityGenerated ) { return LOGRESULTS . NOACTION ; } if ( ! doSimulate ) { if ( srcFs . setReplication ( p , ( short ) targetRepl ) == false ) { LOG . info ( "Error in reducing replication of " + p + " to " + targetRepl ) ; statistics . remainingSize += diskSpace ; return LOGRESULTS . FAILURE ; } ; } diskSpace = 0 ; for ( BlockLocation l : locations ) { diskSpace += ( l . getLength ( ) * targetRepl ) ; } statistics . remainingSize += diskSpace ; int numMeta = locations . length / codec . stripeLength ; if ( locations . length % codec . stripeLength != 0 ) { numMeta ++ ; } statistics . numMetaBlocks += ( numMeta * metaRepl ) ; statistics . metaSize += ( numMeta * metaRepl * stat . getBlockSize ( ) ) ; return LOGRESULTS . SUCCESS ; }
RAID an individual file
32,078
public static RaidNode createRaidNode ( Configuration conf ) throws ClassNotFoundException { try { Class < ? > raidNodeClass = conf . getClass ( RAIDNODE_CLASSNAME_KEY , DistRaidNode . class ) ; if ( ! RaidNode . class . isAssignableFrom ( raidNodeClass ) ) { throw new ClassNotFoundException ( "not an implementation of RaidNode" ) ; } Constructor < ? > constructor = raidNodeClass . getConstructor ( new Class [ ] { Configuration . class } ) ; return ( RaidNode ) constructor . newInstance ( conf ) ; } catch ( NoSuchMethodException e ) { throw new ClassNotFoundException ( "cannot construct raidnode" , e ) ; } catch ( InstantiationException e ) { throw new ClassNotFoundException ( "cannot construct raidnode" , e ) ; } catch ( IllegalAccessException e ) { throw new ClassNotFoundException ( "cannot construct raidnode" , e ) ; } catch ( InvocationTargetException e ) { throw new ClassNotFoundException ( "cannot construct raidnode" , e ) ; } }
Create an instance of the appropriate subclass of RaidNode
32,079
public static RaidNode createRaidNode ( String argv [ ] , Configuration conf ) throws IOException , ClassNotFoundException { if ( conf == null ) { conf = new Configuration ( ) ; } StartupOption startOpt = parseArguments ( argv ) ; if ( startOpt == null ) { printUsage ( ) ; return null ; } setStartupOption ( conf , startOpt ) ; RaidNode node = createRaidNode ( conf ) ; return node ; }
Create an instance of the RaidNode
32,080
public static String getJobID ( Configuration conf ) { String jobId = conf . get ( "mapred.job.id" , null ) ; if ( jobId == null ) { jobId = "localRaid" + df . format ( new Date ( ) ) ; conf . set ( "mapred.job.id" , jobId ) ; } return jobId ; }
Get the job id from the configuration
32,081
public static String [ ] adjustConf ( String [ ] argv , Configuration conf ) { String [ ] serviceId = new String [ ] { "" } ; String [ ] filteredArgv = DFSUtil . getServiceName ( argv , serviceId ) ; if ( ! serviceId [ 0 ] . equals ( "" ) ) { NameNode . checkServiceName ( conf , serviceId [ 0 ] ) ; DFSUtil . setGenericConf ( conf , serviceId [ 0 ] , AvatarNode . AVATARSERVICE_SPECIFIC_KEYS ) ; NameNode . setupDefaultURI ( conf ) ; } return filteredArgv ; }
Adjust configuration for nameservice keys .
32,082
@ SuppressWarnings ( "unchecked" ) static Map < String , Integer > [ ] countCompanionBlocks ( Collection < LocatedBlock > companionBlocks ) { Map < String , Integer > [ ] result = new HashMap [ 2 ] ; result [ 0 ] = new HashMap < String , Integer > ( ) ; result [ 1 ] = new HashMap < String , Integer > ( ) ; for ( LocatedBlock block : companionBlocks ) { for ( DatanodeInfo d : block . getLocations ( ) ) { String name = d . getName ( ) ; Integer currentCount = result [ 0 ] . get ( name ) ; result [ 0 ] . put ( name , currentCount == null ? 1 : currentCount + 1 ) ; name = d . getParent ( ) . getName ( ) ; currentCount = result [ 1 ] . get ( name ) ; result [ 1 ] . put ( name , currentCount == null ? 1 : currentCount + 1 ) ; } } return result ; }
Count how many companion blocks are on each datanode or the each rack
32,083
List < LocatedBlock > getCompanionBlocks ( String path , FileInfo info , Block block , FSInodeInfo inode ) throws IOException { Codec codec = info . codec ; switch ( info . type ) { case NOT_RAID : return Collections . emptyList ( ) ; case HAR_TEMP_PARITY : return getCompanionBlocksForHarParityBlock ( path , codec . parityLength , block , inode ) ; case TEMP_PARITY : NameWithINode ni = getSourceFile ( path , codec . tmpParityDirectory ) ; return getCompanionBlocksForParityBlock ( ni . name , path , codec . parityLength , codec . stripeLength , block , codec . isDirRaid , ni . inode , inode ) ; case PARITY : ni = getSourceFile ( path , codec . parityDirectory ) ; return getCompanionBlocksForParityBlock ( ni . name , path , codec . parityLength , codec . stripeLength , block , codec . isDirRaid , ni . inode , inode ) ; case SOURCE : return getCompanionBlocksForSourceBlock ( path , info . parityName , codec . parityLength , codec . stripeLength , block , codec . isDirRaid , inode , info . parityInode ) ; } return Collections . emptyList ( ) ; }
Obtain the companion blocks of the give block Companion blocks are defined as the blocks that can help recover each others by using raid decoder .
32,084
NameWithINode getSourceFile ( String parity , String prefix ) throws IOException { if ( isHarFile ( parity ) ) { return null ; } String src = parity . substring ( prefix . length ( ) ) ; byte [ ] [ ] components = INodeDirectory . getPathComponents ( src ) ; INode inode = namesystem . dir . getINode ( components ) ; return new NameWithINode ( src , inode ) ; }
Get path for the corresponding source file for a valid parity file . Returns null if it does not exists
32,085
private NameWithINode getParityFile ( Codec codec , String src ) throws IOException { String parity ; if ( codec . isDirRaid ) { String parent = getParentPath ( src ) ; parity = codec . parityDirectory + parent ; } else { parity = codec . parityDirectory + src ; } byte [ ] [ ] components = INodeDirectory . getPathComponents ( parity ) ; INode parityInode = namesystem . dir . getINode ( components ) ; if ( parityInode == null ) return null ; return new NameWithINode ( parity , parityInode ) ; }
Get path for the parity file . Returns null if it does not exists
32,086
protected FileInfo getFileInfo ( FSInodeInfo srcINode , String path ) throws IOException { for ( Codec c : Codec . getCodecs ( ) ) { if ( path . startsWith ( c . tmpHarDirectoryPS ) ) { return new FileInfo ( FileType . HAR_TEMP_PARITY , c ) ; } if ( path . startsWith ( c . tmpParityDirectoryPS ) ) { return new FileInfo ( FileType . TEMP_PARITY , c ) ; } if ( path . startsWith ( c . parityDirectoryPS ) ) { return new FileInfo ( FileType . PARITY , c ) ; } NameWithINode ni = getParityFile ( c , path ) ; if ( ni != null ) { if ( c . isDirRaid && srcINode != null && srcINode instanceof INodeFile ) { INodeFile inf = ( INodeFile ) srcINode ; if ( inf . getFileSize ( ) < this . minFileSize ) { return new FileInfo ( FileType . NOT_RAID , null ) ; } } return new FileInfo ( FileType . SOURCE , c , ni . name , ni . inode ) ; } } return new FileInfo ( FileType . NOT_RAID , null ) ; }
Return raid information about a file for example if this file is the source file parity file or not raid
32,087
public boolean isMonitoring ( ) { boolean ret = true ; for ( MetricsContext ctxt : subctxt ) { ret &= ctxt . isMonitoring ( ) ; } return ret ; }
Return true if all subcontexts are monitoring .
32,088
public int read ( ) throws IOException { while ( true ) { lockR . lock ( ) ; try { if ( availableCount . get ( ) > 0 ) { int b = bytes [ readCursor ] & 0xFF ; incReadCursor ( 1 ) ; availableCount . decrementAndGet ( ) ; totalRead ++ ; return b ; } else if ( closed ) { return - 1 ; } } finally { lockR . unlock ( ) ; } sleep ( 1 ) ; } }
Read a single byte . Blocks until data is available or fail if buffer is closed .
32,089
public int read ( byte [ ] buf , int off , int len ) throws IOException { while ( true ) { lockR . lock ( ) ; try { int available = availableCount . get ( ) ; if ( available > 0 ) { final int lenToRead = Math . min ( available , len ) ; final int lenForward = Math . min ( lenToRead , length - readCursor ) ; final int lenRemaining = lenToRead - lenForward ; if ( lenForward > 0 ) { System . arraycopy ( bytes , readCursor , buf , off , lenForward ) ; incReadCursor ( lenForward ) ; } if ( lenRemaining > 0 ) { System . arraycopy ( bytes , 0 , buf , off + lenForward , lenRemaining ) ; incReadCursor ( lenRemaining ) ; } availableCount . addAndGet ( - 1 * lenToRead ) ; totalRead += lenToRead ; return lenToRead ; } else if ( nonBlockingRead ) { return 0 ; } else if ( closed ) { return - 1 ; } } finally { lockR . unlock ( ) ; } sleep ( 1 ) ; } }
Read data to the buffer starting at offset off . Will block until data available or the input has been closed .
32,090
public void write ( byte [ ] buf , int off , int len ) throws IOException { while ( true ) { lockW . lock ( ) ; try { checkClosed ( ) ; final int lenToWrite = Math . min ( len , length - availableCount . get ( ) ) ; final int lenForward = Math . min ( lenToWrite , length - writeCursor ) ; final int lenRemaining = lenToWrite - lenForward ; if ( lenForward > 0 ) { System . arraycopy ( buf , off , bytes , writeCursor , lenForward ) ; incWriteCursor ( lenForward ) ; } if ( lenRemaining > 0 ) { System . arraycopy ( buf , off + lenForward , bytes , 0 , lenRemaining ) ; incWriteCursor ( lenRemaining ) ; } availableCount . addAndGet ( lenToWrite ) ; totalWritten += lenToWrite ; off += lenToWrite ; len -= lenToWrite ; if ( len == 0 ) { return ; } } finally { lockW . unlock ( ) ; } sleep ( 1 ) ; } }
Write buf to the buffer will block until it can write len bytes . Will fail if buffer is closed .
32,091
public void write ( int b ) throws IOException { while ( true ) { lockW . lock ( ) ; try { checkClosed ( ) ; if ( length - availableCount . get ( ) > 0 ) { bytes [ writeCursor ] = ( byte ) b ; incWriteCursor ( 1 ) ; availableCount . incrementAndGet ( ) ; totalWritten ++ ; return ; } } finally { lockW . unlock ( ) ; } sleep ( 1 ) ; } }
Write a single byte to the buffer . Will block until the byte can be written or fail when buffer is closed .
32,092
private static JobConf createJobConf ( Configuration conf ) { JobConf jobconf = new JobConf ( conf , DistRaid . class ) ; jobName = NAME + " " + dateForm . format ( new Date ( RaidNode . now ( ) ) ) ; jobconf . setUser ( RaidNode . JOBUSER ) ; jobconf . setJobName ( jobName ) ; jobconf . setMapSpeculativeExecution ( false ) ; RaidUtils . parseAndSetOptions ( jobconf , SCHEDULER_OPTION_LABEL ) ; jobconf . setJarByClass ( DistRaid . class ) ; jobconf . setInputFormat ( DistRaidInputFormat . class ) ; jobconf . setOutputKeyClass ( Text . class ) ; jobconf . setOutputValueClass ( Text . class ) ; jobconf . setMapperClass ( DistRaidMapper . class ) ; jobconf . setNumReduceTasks ( 0 ) ; return jobconf ; }
create new job conf based on configuration passed .
32,093
public void addRaidPaths ( PolicyInfo info , List < EncodingCandidate > paths ) { raidPolicyPathPairList . add ( new RaidPolicyPathPair ( info , paths ) ) ; }
Add paths to be raided
32,094
public boolean startDistRaid ( ) throws IOException { assert ( raidPolicyPathPairList . size ( ) > 0 ) ; if ( setup ( ) ) { this . jobClient = new JobClient ( jobconf ) ; this . runningJob = this . jobClient . submitJob ( jobconf ) ; LOG . info ( "Job Started: " + runningJob . getID ( ) ) ; this . startTime = System . currentTimeMillis ( ) ; return true ; } return false ; }
Invokes a map - reduce job do parallel raiding .
32,095
public boolean checkComplete ( ) throws IOException { JobID jobID = runningJob . getID ( ) ; if ( runningJob . isComplete ( ) ) { final String jobdir = jobconf . get ( JOB_DIR_LABEL ) ; if ( jobdir != null ) { final Path jobpath = new Path ( jobdir ) ; jobpath . getFileSystem ( jobconf ) . delete ( jobpath , true ) ; } if ( runningJob . isSuccessful ( ) ) { LOG . info ( "Job Complete(Succeeded): " + jobID ) ; } else { LOG . info ( "Job Complete(Failed): " + jobID ) ; } raidPolicyPathPairList . clear ( ) ; Counters ctrs = runningJob . getCounters ( ) ; if ( ctrs != null ) { RaidNodeMetrics metrics = RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) ; if ( ctrs . findCounter ( Counter . FILES_FAILED ) != null ) { long filesFailed = ctrs . findCounter ( Counter . FILES_FAILED ) . getValue ( ) ; metrics . raidFailures . inc ( filesFailed ) ; } long slotSeconds = ctrs . findCounter ( JobInProgress . Counter . SLOTS_MILLIS_MAPS ) . getValue ( ) / 1000 ; metrics . raidSlotSeconds . inc ( slotSeconds ) ; } return true ; } else { String report = ( " job " + jobID + " map " + StringUtils . formatPercent ( runningJob . mapProgress ( ) , 0 ) + " reduce " + StringUtils . formatPercent ( runningJob . reduceProgress ( ) , 0 ) ) ; if ( ! report . equals ( lastReport ) ) { LOG . info ( report ) ; lastReport = report ; } TaskCompletionEvent [ ] events = runningJob . getTaskCompletionEvents ( jobEventCounter ) ; jobEventCounter += events . length ; for ( TaskCompletionEvent event : events ) { if ( event . getTaskStatus ( ) == TaskCompletionEvent . Status . FAILED ) { LOG . info ( " Job " + jobID + " " + event . toString ( ) ) ; } } return false ; } }
Checks if the map - reduce job has completed .
32,096
private boolean setup ( ) throws IOException { estimateSavings ( ) ; final String randomId = getRandomId ( ) ; JobClient jClient = new JobClient ( jobconf ) ; Path jobdir = new Path ( jClient . getSystemDir ( ) , NAME + "_" + randomId ) ; LOG . info ( JOB_DIR_LABEL + "=" + jobdir ) ; jobconf . set ( JOB_DIR_LABEL , jobdir . toString ( ) ) ; Path log = new Path ( jobdir , "_logs" ) ; jobconf . setInt ( "dfs.blocks.size" , OP_LIST_BLOCK_SIZE ) ; FileOutputFormat . setOutputPath ( jobconf , log ) ; LOG . info ( "log=" + log ) ; FileSystem fs = jobdir . getFileSystem ( jobconf ) ; Path opList = new Path ( jobdir , "_" + OP_LIST_LABEL ) ; jobconf . set ( OP_LIST_LABEL , opList . toString ( ) ) ; int opCount = 0 , synCount = 0 ; SequenceFile . Writer opWriter = null ; try { opWriter = SequenceFile . createWriter ( fs , jobconf , opList , Text . class , PolicyInfo . class , SequenceFile . CompressionType . NONE ) ; for ( RaidPolicyPathPair p : raidPolicyPathPairList ) { java . util . Collections . shuffle ( p . srcPaths ) ; for ( EncodingCandidate ec : p . srcPaths ) { opWriter . append ( new Text ( ec . toString ( ) ) , p . policy ) ; opCount ++ ; if ( ++ synCount > SYNC_FILE_MAX ) { opWriter . sync ( ) ; synCount = 0 ; } } } } finally { if ( opWriter != null ) { opWriter . close ( ) ; } fs . setReplication ( opList , OP_LIST_REPLICATION ) ; } raidPolicyPathPairList . clear ( ) ; jobconf . setInt ( OP_COUNT_LABEL , opCount ) ; LOG . info ( "Number of files=" + opCount ) ; jobconf . setNumMapTasks ( getMapCount ( opCount ) ) ; LOG . info ( "jobName= " + jobName + " numMapTasks=" + jobconf . getNumMapTasks ( ) ) ; return opCount != 0 ; }
set up input file which has the list of input files .
32,097
private ArrayList < String > getStrings ( Object o ) { ArrayList < String > retval = new ArrayList < String > ( ) ; retval . clear ( ) ; if ( o == null ) retval . add ( "null" ) ; else if ( o instanceof String ) retval . add ( ( String ) o ) ; else if ( o instanceof Calendar ) retval . add ( dateFormatter . format ( ( ( Calendar ) o ) . getTime ( ) ) ) ; else if ( o instanceof InetAddress [ ] ) for ( InetAddress ip : ( ( InetAddress [ ] ) o ) ) retval . add ( ip . getHostAddress ( ) ) ; else if ( o instanceof String [ ] ) for ( String s : ( String [ ] ) o ) retval . add ( s ) ; else retval . add ( o . toString ( ) ) ; return retval ; }
Extract String representations from an Object .
32,098
public static ClientProtocol createNamenode ( Configuration conf ) throws IOException { return createNamenode ( NameNode . getClientProtocolAddress ( conf ) , conf ) ; }
The locking hierarchy is to first acquire lock on DFSClient object followed by lock on leasechecker followed by lock on an individual DFSOutputStream .
32,099
private void createRPCNamenodeIfCompatible ( InetSocketAddress nameNodeAddr , Configuration conf , UserGroupInformation ugi ) throws IOException { try { this . namenodeProtocolProxy = createRPCNamenode ( nameNodeAddr , conf , ugi , namenodeRPCSocketTimeout ) ; this . rpcNamenode = namenodeProtocolProxy . getProxy ( ) ; } catch ( RPC . VersionMismatch e ) { long clientVersion = e . getClientVersion ( ) ; namenodeVersion = e . getServerVersion ( ) ; if ( clientVersion > namenodeVersion && ! ProtocolCompatible . isCompatibleClientProtocol ( clientVersion , namenodeVersion ) ) { throw new RPC . VersionIncompatible ( ClientProtocol . class . getName ( ) , clientVersion , namenodeVersion ) ; } this . rpcNamenode = ( ClientProtocol ) e . getProxy ( ) ; } }
Create a NameNode proxy for the client if the client and NameNode are compatible