idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,700
private void visitTxId ( ) throws IOException { if ( LayoutVersion . supports ( Feature . STORED_TXIDS , editsVersion ) ) { v . visitLong ( EditsElement . TRANSACTION_ID ) ; } }
Visit a transaction ID if the log version supports it .
32,701
private void visit_OP_ADD_or_OP_CLOSE ( FSEditLogOpCodes editsOpCode ) throws IOException { visitTxId ( ) ; if ( ! LayoutVersion . supports ( Feature . EDITLOG_OP_OPTIMIZATION , editsVersion ) ) { IntToken opAddLength = v . visitInt ( EditsElement . LENGTH ) ; if ( opAddLength . value == 0 ) { throw new IOException ( "OpCode " + editsOpCode + " has zero length (corrupted edits)" ) ; } } v . visitStringUTF8 ( EditsElement . PATH ) ; if ( LayoutVersion . supports ( Feature . ADD_INODE_ID , editsVersion ) ) { v . visitLong ( EditsElement . INODE_ID ) ; } if ( LayoutVersion . supports ( Feature . EDITLOG_OP_OPTIMIZATION , editsVersion ) ) { v . visitShort ( EditsElement . REPLICATION ) ; v . visitLong ( EditsElement . MTIME ) ; v . visitLong ( EditsElement . ATIME ) ; v . visitLong ( EditsElement . BLOCKSIZE ) ; } else { v . visitStringUTF8 ( EditsElement . REPLICATION ) ; v . visitStringUTF8 ( EditsElement . MTIME ) ; v . visitStringUTF8 ( EditsElement . ATIME ) ; v . visitStringUTF8 ( EditsElement . BLOCKSIZE ) ; } visit_Blocks ( ) ; v . visitEnclosingElement ( EditsElement . PERMISSION_STATUS ) ; v . visitStringText ( EditsElement . USERNAME ) ; v . visitStringText ( EditsElement . GROUPNAME ) ; v . visitShort ( EditsElement . FS_PERMISSIONS ) ; v . leaveEnclosingElement ( ) ; if ( editsOpCode == FSEditLogOpCodes . OP_ADD ) { v . visitStringUTF8 ( EditsElement . CLIENT_NAME ) ; v . visitStringUTF8 ( EditsElement . CLIENT_MACHINE ) ; } }
Visit OP_ADD and OP_CLOSE they are almost the same
32,702
public void loadEdits ( ) throws IOException { try { v . start ( ) ; v . visitEnclosingElement ( EditsElement . EDITS ) ; IntToken editsVersionToken = v . visitInt ( EditsElement . EDITS_VERSION ) ; editsVersion = editsVersionToken . value ; if ( ! canLoadVersion ( editsVersion ) ) { throw new IOException ( "Cannot process editLog version " + editsVersionToken . value ) ; } FSEditLogOpCodes editsOpCode ; do { v . visitEnclosingElement ( EditsElement . RECORD ) ; ByteToken opCodeToken ; try { opCodeToken = v . visitByte ( EditsElement . OPCODE ) ; } catch ( EOFException eof ) { opCodeToken = new ByteToken ( EditsElement . OPCODE ) ; opCodeToken . fromByte ( FSEditLogOpCodes . OP_INVALID . getOpCode ( ) ) ; v . visit ( opCodeToken ) ; } editsOpCode = FSEditLogOpCodes . fromByte ( opCodeToken . value ) ; v . visitEnclosingElement ( EditsElement . DATA ) ; visitOpCode ( editsOpCode ) ; v . leaveEnclosingElement ( ) ; if ( editsOpCode != FSEditLogOpCodes . OP_INVALID && LayoutVersion . supports ( Feature . EDITS_CHESKUM , editsVersion ) ) { v . visitInt ( EditsElement . CHECKSUM ) ; } v . leaveEnclosingElement ( ) ; } while ( editsOpCode != FSEditLogOpCodes . OP_INVALID ) ; v . leaveEnclosingElement ( ) ; v . finish ( ) ; } catch ( IOException e ) { v . finishAbnormally ( ) ; throw e ; } }
Loads edits file uses visitor to process all elements
32,703
private static ResourceBundle getResourceBundle ( String enumClassName ) { String bundleName = enumClassName . replace ( '$' , '_' ) ; return ResourceBundle . getBundle ( bundleName ) ; }
Returns the specified resource bundle or throws an exception .
32,704
protected Counter findCounter ( String counterName , String displayName ) { Counter result = counters . get ( counterName ) ; if ( result == null ) { result = new Counter ( counterName , displayName ) ; counters . put ( counterName , result ) ; } return result ; }
Internal to find a counter in a group .
32,705
private String localize ( String key , String defaultValue ) { String result = defaultValue ; if ( bundle != null ) { try { result = bundle . getString ( key ) ; } catch ( MissingResourceException mre ) { } } return result ; }
Looks up key in the ResourceBundle and returns the corresponding value . If the bundle or the key doesn t exist returns the default value .
32,706
public static String getDefaultAddress ( Configuration conf ) { URI uri = FileSystem . getDefaultUri ( conf ) ; String authority = uri . getAuthority ( ) ; if ( authority == null ) { throw new IllegalArgumentException ( String . format ( "Invalid URI for NameNode address (check %s): %s has no authority." , FileSystem . FS_DEFAULT_NAME_KEY , uri . toString ( ) ) ) ; } return authority ; }
Returns the fs . default name from the configuration as a string for znode name retrieval without a DNS lookup .
32,707
protected void initialize ( ) throws IOException { if ( serviceAuthEnabled = getConf ( ) . getBoolean ( ServiceAuthorizationManager . SERVICE_AUTHORIZATION_CONFIG , false ) ) { PolicyProvider policyProvider = ( PolicyProvider ) ( ReflectionUtils . newInstance ( getConf ( ) . getClass ( PolicyProvider . POLICY_PROVIDER_CONFIG , HDFSPolicyProvider . class , PolicyProvider . class ) , getConf ( ) ) ) ; SecurityUtil . setPolicy ( new ConfiguredPolicy ( getConf ( ) , policyProvider ) ) ; } NetUtils . isSocketBindable ( getClientProtocolAddress ( getConf ( ) ) ) ; NetUtils . isSocketBindable ( getDNProtocolAddress ( getConf ( ) ) ) ; NetUtils . isSocketBindable ( getHttpServerAddress ( getConf ( ) ) ) ; long serverVersion = ClientProtocol . versionID ; this . clientProtocolMethodsFingerprint = ProtocolSignature . getMethodsSigFingerPrint ( ClientProtocol . class , serverVersion ) ; myMetrics = new NameNodeMetrics ( getConf ( ) , this ) ; this . clusterName = getConf ( ) . get ( FSConstants . DFS_CLUSTER_NAME ) ; this . namesystem = new FSNamesystem ( this , getConf ( ) ) ; JspHelper . fsn = this . namesystem ; this . startDNServer ( ) ; startHttpServer ( getConf ( ) ) ; }
Initialize name - node .
32,708
protected static void adjustMetaDirectoryNames ( Configuration conf , String serviceKey ) { adjustMetaDirectoryName ( conf , DFS_NAMENODE_NAME_DIR_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_NAMENODE_EDITS_DIR_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_NAMENODE_CHECKPOINT_DIR_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY , serviceKey ) ; }
Append service name to each meta directory name
32,709
protected void stopRPC ( boolean interruptClientHandlers ) throws IOException , InterruptedException { stopRPCInternal ( server , "client" , interruptClientHandlers ) ; stopRPCInternal ( dnProtocolServer , "datanode" , interruptClientHandlers ) ; stopWaitRPCInternal ( server , "client" ) ; stopWaitRPCInternal ( dnProtocolServer , "datanode" ) ; }
Quiescess all communication to namenode cleanly . Ensures all RPC handlers have exited .
32,710
public void stop ( ) { if ( stopRequested ) return ; stopRequested = true ; LOG . info ( "Stopping http server" ) ; try { if ( httpServer != null ) httpServer . stop ( ) ; } catch ( Exception e ) { LOG . error ( StringUtils . stringifyException ( e ) ) ; } LOG . info ( "Stopping namesystem" ) ; if ( namesystem != null ) namesystem . close ( ) ; LOG . info ( "Stopping emptier" ) ; if ( emptier != null ) emptier . interrupt ( ) ; LOG . info ( "Stopping rpc servers" ) ; if ( server != null ) server . stop ( ) ; if ( dnProtocolServer != null ) dnProtocolServer . stop ( ) ; LOG . info ( "Stopping metrics" ) ; if ( myMetrics != null ) { myMetrics . shutdown ( ) ; } LOG . info ( "Stopping namesystem mbeans" ) ; if ( namesystem != null ) { namesystem . shutdown ( ) ; } }
Stop all NameNode threads and wait for all to finish .
32,711
public LocatedBlock addBlock ( String src , String clientName ) throws IOException { return addBlock ( src , clientName , null ) ; }
Stub for 0 . 20 clients that don t support HDFS - 630
32,712
public void abandonBlock ( Block b , String src , String holder ) throws IOException { abandonBlockInternal ( b , src , holder ) ; }
The client needs to give up on the block .
32,713
public DatanodeCommand [ ] sendHeartbeat ( DatanodeRegistration nodeReg , long capacity , long dfsUsed , long remaining , long namespaceUsed , int xmitsInProgress , int xceiverCount ) throws IOException { verifyRequest ( nodeReg ) ; myMetrics . numHeartbeat . inc ( ) ; return namesystem . handleHeartbeat ( nodeReg , capacity , dfsUsed , remaining , namespaceUsed , xceiverCount , xmitsInProgress ) ; }
Data node notify the name node that it is alive Return an array of block - oriented commands for the datanode to execute . This will be either a transfer or a delete operation .
32,714
public void blocksBeingWrittenReport ( DatanodeRegistration nodeReg , BlockReport blocks ) throws IOException { verifyRequest ( nodeReg ) ; long [ ] blocksAsLong = blocks . getBlockReportInLongs ( ) ; BlockListAsLongs blist = new BlockListAsLongs ( blocksAsLong ) ; boolean processed = namesystem . processBlocksBeingWrittenReport ( nodeReg , blist ) ; String message = "*BLOCK* NameNode.blocksBeingWrittenReport: " + "from " + nodeReg . getName ( ) + " " + blist . getNumberOfBlocks ( ) + " blocks" ; if ( ! processed ) { message += " was discarded." ; } stateChangeLog . info ( message ) ; }
add new replica blocks to the Inode to target mapping also add the Inode file to DataNodeDesc
32,715
public void verifyRequest ( DatanodeRegistration nodeReg ) throws IOException { verifyVersion ( nodeReg . getVersion ( ) , LAYOUT_VERSION , "layout" ) ; if ( getNamespaceID ( ) != nodeReg . storageInfo . namespaceID || getCTime ( ) < nodeReg . storageInfo . cTime ) { LOG . warn ( "Invalid Request : NN namespaceId, cTime : " + getNamespaceID ( ) + ", " + getCTime ( ) + " DN namespaceId, cTime : " + nodeReg . storageInfo . namespaceID + ", " + nodeReg . storageInfo . cTime ) ; throw new UnregisteredDatanodeException ( nodeReg ) ; } myMetrics . numVersionRequest . inc ( ) ; }
Verify request .
32,716
public static void verifyVersion ( int reportedVersion , int expectedVersion , String annotation ) throws IOException { if ( ( reportedVersion ^ expectedVersion ) < 0 ) { throw new IOException ( "reportedVersion and expectedVersion have" + " different signs : " + reportedVersion + ", " + expectedVersion ) ; } if ( Math . abs ( reportedVersion ) < Math . abs ( expectedVersion ) ) throw new IncorrectVersionException ( reportedVersion , "data node " + annotation , expectedVersion ) ; }
Verify version .
32,717
public List < FileStatusExtended > getRandomFilesSample ( double percentage ) { if ( ! ( percentage > 0 && percentage <= 1.0 ) ) { throw new IllegalArgumentException ( "Invalid percentage : " + percentage + " value should be between (0 - 1.0]" ) ; } LOG . info ( "Sampling : " + ( percentage * 100 ) + " percent of files" ) ; return namesystem . getRandomFiles ( percentage ) ; }
Get a sample of the total files in the FileSystem . The sampling is done randomly .
32,718
static boolean format ( Configuration conf , boolean force , boolean isConfirmationNeeded ) throws IOException { boolean allowFormat = conf . getBoolean ( "dfs.namenode.support.allowformat" , true ) ; if ( ! allowFormat ) { throw new IOException ( "The option dfs.namenode.support.allowformat is " + "set to false for this filesystem, so it " + "cannot be formatted. You will need to set " + "dfs.namenode.support.allowformat parameter " + "to true in order to format this filesystem" ) ; } Collection < URI > dirsToFormat = NNStorageConfiguration . getNamespaceDirs ( conf ) ; Collection < URI > editDirsToFormat = NNStorageConfiguration . getNamespaceEditsDirs ( conf ) ; FSNamesystem nsys = new FSNamesystem ( new FSImage ( conf , dirsToFormat , editDirsToFormat , null ) , conf ) ; try { if ( ! nsys . dir . fsImage . confirmFormat ( force , isConfirmationNeeded ) ) { return true ; } nsys . dir . fsImage . format ( ) ; return false ; } finally { nsys . close ( ) ; } }
Verify that configured directories exist then Interactively confirm that formatting is desired for each existing directory and format them .
32,719
public static boolean validateServiceName ( Configuration conf , String nameServiceId ) { Collection < String > nameserviceIds = DFSUtil . getNameServiceIds ( conf ) ; if ( nameserviceIds != null && ! nameserviceIds . isEmpty ( ) ) { if ( nameServiceId == null ) { System . err . println ( "Need to input a nameservice id" ) ; return false ; } else if ( ! nameserviceIds . contains ( nameServiceId ) ) { System . err . println ( "An invalid nameservice id: " + nameServiceId ) ; return false ; } } else if ( nameServiceId != null ) { System . err . println ( "An invalid nameservice id: " + nameServiceId ) ; return false ; } return true ; }
Valide if the input service name is valid
32,720
protected void validateCheckpointerAddress ( InetAddress configuredRemoteAddress ) throws IOException { InetAddress remoteAddress = Server . getRemoteIp ( ) ; InjectionHandler . processEvent ( InjectionEvent . NAMENODE_VERIFY_CHECKPOINTER , remoteAddress ) ; LOG . info ( "Verify: received request from: " + remoteAddress ) ; if ( remoteAddress == null ) { LOG . info ( "Verify: Remote address is NULL" ) ; throw new IOException ( "Verify: Remote address is null" ) ; } if ( configuredRemoteAddress == null || configuredRemoteAddress . equals ( new InetSocketAddress ( "0.0.0.0" , 0 ) . getAddress ( ) ) ) { LOG . info ( "Verify: Skipping check since the configured address is: " + configuredRemoteAddress ) ; return ; } if ( ! remoteAddress . equals ( configuredRemoteAddress ) ) { String msg = "Verify: Configured standby is :" + configuredRemoteAddress + ", not allowing: " + remoteAddress + " to register" ; LOG . warn ( msg ) ; throw new IOException ( msg ) ; } }
Checks if the ip of the caller is equal to the given configured address .
32,721
public static boolean checkFile ( Configuration conf , FileSystem srcFs , FileSystem parityFs , Path srcPath , Path parityPath , Codec codec , Progressable reporter , boolean sourceOnly ) throws IOException , InterruptedException { FileStatus stat = srcFs . getFileStatus ( srcPath ) ; long blockSize = stat . getBlockSize ( ) ; long len = stat . getLen ( ) ; List < Long > offsets = new ArrayList < Long > ( ) ; for ( int i = 0 ; i * blockSize < len ; i += codec . stripeLength ) { offsets . add ( i * blockSize ) ; } for ( long blockOffset : offsets ) { if ( sourceOnly ) { if ( ! verifySourceFile ( conf , srcFs , stat , codec , blockOffset , reporter ) ) { return false ; } } else { if ( ! verifyFile ( conf , srcFs , parityFs , stat , parityPath , codec , blockOffset , reporter ) ) { return false ; } } } return true ; }
Check a file .
32,722
private static boolean verifyFile ( Configuration conf , FileSystem srcFs , FileSystem parityFs , FileStatus stat , Path parityPath , Codec codec , long blockOffset , Progressable reporter ) throws IOException , InterruptedException { Path srcPath = stat . getPath ( ) ; LOG . info ( "Verify file: " + srcPath + " at offset: " + blockOffset ) ; int limit = ( int ) Math . min ( stat . getBlockSize ( ) , DEFAULT_VERIFY_LEN ) ; if ( reporter == null ) { reporter = RaidUtils . NULL_PROGRESSABLE ; } Decoder decoder = new Decoder ( conf , codec ) ; if ( codec . isDirRaid ) { decoder . connectToStore ( srcPath ) ; } List < Long > errorOffsets = new ArrayList < Long > ( ) ; errorOffsets . add ( blockOffset ) ; long left = Math . min ( stat . getBlockSize ( ) , stat . getLen ( ) - blockOffset ) ; if ( left > limit ) { errorOffsets . add ( blockOffset + left - limit ) ; errorOffsets . add ( blockOffset + rand . nextInt ( ( int ) ( left - limit ) ) ) ; } byte [ ] buffer = new byte [ limit ] ; FSDataInputStream is = srcFs . open ( srcPath ) ; try { for ( long errorOffset : errorOffsets ) { is . seek ( errorOffset ) ; is . read ( buffer ) ; CRC32 oldCrc = new CRC32 ( ) ; oldCrc . update ( buffer ) ; CRC32 newCrc = new CRC32 ( ) ; DecoderInputStream stream = decoder . new DecoderInputStream ( RaidUtils . NULL_PROGRESSABLE , limit , stat . getBlockSize ( ) , errorOffset , srcFs , srcPath , parityFs , parityPath , null , null , false ) ; try { stream . read ( buffer ) ; newCrc . update ( buffer ) ; if ( oldCrc . getValue ( ) != newCrc . getValue ( ) ) { LogUtils . logFileCheckMetrics ( LOGRESULTS . FAILURE , codec , srcPath , srcFs , errorOffset , limit , null , reporter ) ; LOG . error ( "mismatch crc, old " + oldCrc . getValue ( ) + ", new " + newCrc . getValue ( ) + ", for file: " + srcPath + " at offset " + errorOffset + ", read limit " + limit ) ; return false ; } } finally { reporter . progress ( ) ; if ( stream != null ) { stream . close ( ) ; } } } return true ; } finally { is . close ( ) ; } }
Verify the certain offset of a file .
32,723
protected void addLocatedInputPathRecursively ( List < LocatedFileStatus > result , FileSystem fs , Path path , PathFilter inputFilter ) throws IOException { for ( RemoteIterator < LocatedFileStatus > itor = fs . listLocatedStatus ( path , inputFilter ) ; itor . hasNext ( ) ; ) { LocatedFileStatus stat = itor . next ( ) ; if ( stat . isDir ( ) ) { addLocatedInputPathRecursively ( result , fs , stat . getPath ( ) , inputFilter ) ; } else { result . add ( stat ) ; } } }
Add files in the input path recursively into the results .
32,724
private static String [ ] getPathStrings ( String commaSeparatedPaths ) { int length = commaSeparatedPaths . length ( ) ; int curlyOpen = 0 ; int pathStart = 0 ; boolean globPattern = false ; List < String > pathStrings = new ArrayList < String > ( ) ; for ( int i = 0 ; i < length ; i ++ ) { char ch = commaSeparatedPaths . charAt ( i ) ; switch ( ch ) { case '{' : { curlyOpen ++ ; if ( ! globPattern ) { globPattern = true ; } break ; } case '}' : { curlyOpen -- ; if ( curlyOpen == 0 && globPattern ) { globPattern = false ; } break ; } case ',' : { if ( ! globPattern ) { pathStrings . add ( commaSeparatedPaths . substring ( pathStart , i ) ) ; pathStart = i + 1 ; } break ; } } } pathStrings . add ( commaSeparatedPaths . substring ( pathStart , length ) ) ; return pathStrings . toArray ( new String [ 0 ] ) ; }
This method escapes commas in the glob pattern of the given paths .
32,725
protected String [ ] getSplitHosts ( BlockLocation [ ] blkLocations , long offset , long splitSize , NetworkTopology clusterMap ) throws IOException { int startIndex = getBlockIndex ( blkLocations , offset ) ; long bytesInThisBlock = blkLocations [ startIndex ] . getOffset ( ) + blkLocations [ startIndex ] . getLength ( ) - offset ; if ( bytesInThisBlock >= splitSize ) { return blkLocations [ startIndex ] . getHosts ( ) ; } long bytesInFirstBlock = bytesInThisBlock ; int index = startIndex + 1 ; splitSize -= bytesInThisBlock ; while ( splitSize > 0 ) { bytesInThisBlock = Math . min ( splitSize , blkLocations [ index ++ ] . getLength ( ) ) ; splitSize -= bytesInThisBlock ; } long bytesInLastBlock = bytesInThisBlock ; int endIndex = index - 1 ; Map < Node , NodeInfo > hostsMap = new IdentityHashMap < Node , NodeInfo > ( ) ; Map < Node , NodeInfo > racksMap = new IdentityHashMap < Node , NodeInfo > ( ) ; String [ ] allTopos = new String [ 0 ] ; for ( index = startIndex ; index <= endIndex ; index ++ ) { if ( index == startIndex ) { bytesInThisBlock = bytesInFirstBlock ; } else if ( index == endIndex ) { bytesInThisBlock = bytesInLastBlock ; } else { bytesInThisBlock = blkLocations [ index ] . getLength ( ) ; } allTopos = blkLocations [ index ] . getTopologyPaths ( ) ; if ( allTopos . length == 0 ) { allTopos = fakeRacks ( blkLocations , index ) ; } for ( String topo : allTopos ) { Node node , parentNode ; NodeInfo nodeInfo , parentNodeInfo ; node = clusterMap . getNode ( topo ) ; if ( node == null ) { node = new NodeBase ( topo ) ; clusterMap . add ( node ) ; } nodeInfo = hostsMap . get ( node ) ; if ( nodeInfo == null ) { nodeInfo = new NodeInfo ( node ) ; hostsMap . put ( node , nodeInfo ) ; parentNode = node . getParent ( ) ; parentNodeInfo = racksMap . get ( parentNode ) ; if ( parentNodeInfo == null ) { parentNodeInfo = new NodeInfo ( parentNode ) ; racksMap . put ( parentNode , parentNodeInfo ) ; } parentNodeInfo . addLeaf ( nodeInfo ) ; } else { nodeInfo = hostsMap . get ( node ) ; parentNode = node . getParent ( ) ; parentNodeInfo = racksMap . get ( parentNode ) ; } nodeInfo . addValue ( index , bytesInThisBlock ) ; parentNodeInfo . addValue ( index , bytesInThisBlock ) ; } } return identifyHosts ( allTopos . length , racksMap ) ; }
This function identifies and returns the hosts that contribute most for a given split . For calculating the contribution rack locality is treated on par with host locality so hosts from racks that contribute the most are preferred over hosts on racks that contribute less
32,726
@ SuppressWarnings ( "deprecation" ) public void killJob ( JobID jobId , Map < String , InetAddress > allTrackers ) { for ( Map . Entry < String , InetAddress > entry : allTrackers . entrySet ( ) ) { String trackerName = entry . getKey ( ) ; InetAddress addr = entry . getValue ( ) ; String description = "KillJobAction " + jobId ; ActionToSend action = new ActionToSend ( trackerName , addr , new KillJobAction ( jobId ) , description ) ; allWorkQueues . enqueueAction ( action ) ; LOG . info ( "Queueing " + description + " to worker " + trackerName + "(" + addr . host + ":" + addr . port + ")" ) ; } }
Enqueue an action to kill the job .
32,727
public void killTasks ( String trackerName , InetAddress addr , List < KillTaskAction > killActions ) { for ( KillTaskAction killAction : killActions ) { String description = "KillTaskAction " + killAction . getTaskID ( ) ; LOG . info ( "Queueing " + description + " to worker " + trackerName + "(" + addr . host + ":" + addr . port + ")" ) ; allWorkQueues . enqueueAction ( new ActionToSend ( trackerName , addr , killAction , description ) ) ; } }
Enqueue kill tasks actions .
32,728
public void commitTask ( String trackerName , InetAddress addr , CommitTaskAction action ) { String description = "KillTaskAction " + action . getTaskID ( ) ; LOG . info ( "Queueing " + description + " to worker " + trackerName + "(" + addr . host + ":" + addr . port + ")" ) ; allWorkQueues . enqueueAction ( new ActionToSend ( trackerName , addr , action , description ) ) ; }
Enqueue a commit task action .
32,729
public void launchTask ( Task task , String trackerName , InetAddress addr ) { CoronaSessionInfo info = new CoronaSessionInfo ( coronaJT . getSessionId ( ) , coronaJT . getJobTrackerAddress ( ) , coronaJT . getSecondaryTrackerAddress ( ) ) ; LaunchTaskAction action = new LaunchTaskAction ( task , info ) ; String description = "LaunchTaskAction " + action . getTask ( ) . getTaskID ( ) ; ActionToSend actionToSend = new ActionToSend ( trackerName , addr , action , description ) ; LOG . info ( "Queueing " + description + " to worker " + trackerName + "(" + addr . host + ":" + addr . port + ")" ) ; allWorkQueues . enqueueAction ( actionToSend ) ; }
Enqueue a launch task action .
32,730
boolean connectionStateChanged ( int newState ) { switch ( newState ) { case ConnectionManager . CONNECTED : LOG . info ( listeningPort + ": Switched to CONNECTED state." ) ; try { return resubscribe ( ) ; } catch ( Exception e ) { LOG . error ( listeningPort + ": Resubscribing failed" , e ) ; return false ; } case ConnectionManager . DISCONNECTED_VISIBLE : LOG . info ( listeningPort + ": Switched to DISCONNECTED_VISIBLE state" ) ; for ( NamespaceEventKey eventKey : watchedEvents . keySet ( ) ) watchedEvents . put ( eventKey , - 1L ) ; watcher . connectionFailed ( ) ; break ; case ConnectionManager . DISCONNECTED_HIDDEN : LOG . info ( listeningPort + ": Switched to DISCONNECTED_HIDDEN state." ) ; } return true ; }
Called by the ConnectionManager when the connection state changed . The connection lock is hold when calling this method so no other methods from the ConnectionManager should be called here .
32,731
public void removeWatch ( String path , EventType watchType ) throws NotConnectedToServerException , InterruptedException , WatchNotPlacedException { NamespaceEvent event = new NamespaceEvent ( path , watchType . getByteValue ( ) ) ; NamespaceEventKey eventKey = new NamespaceEventKey ( path , watchType ) ; Object connectionLock = connectionManager . getConnectionLock ( ) ; ServerHandler . Client server ; LOG . info ( listeningPort + ": removeWatch: Removing watch from " + NotifierUtils . asString ( eventKey ) + " ..." ) ; if ( ! watchedEvents . containsKey ( eventKey ) ) { LOG . warn ( listeningPort + ": removeWatch: watch doesen't exist at " + NotifierUtils . asString ( eventKey ) + " ..." ) ; throw new WatchNotPlacedException ( ) ; } synchronized ( connectionLock ) { connectionManager . waitForTransparentConnect ( ) ; server = connectionManager . getServer ( ) ; try { server . unsubscribe ( connectionManager . getId ( ) , event ) ; } catch ( InvalidClientIdException e1 ) { LOG . warn ( listeningPort + ": removeWatch: server deleted us" , e1 ) ; connectionManager . failConnection ( true ) ; } catch ( ClientNotSubscribedException e2 ) { LOG . error ( listeningPort + ": removeWatch: event not subscribed" , e2 ) ; } catch ( TException e3 ) { LOG . error ( listeningPort + ": removeWatch: failed communicating to" + " server" , e3 ) ; connectionManager . failConnection ( true ) ; } watchedEvents . remove ( eventKey ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( listeningPort + ": Unsubscribed from " + NotifierUtils . asString ( eventKey ) ) ; } }
Removes a previously placed watch for a particular event type from the given path . If the watch is not actually present at that path before calling the method nothing will happen .
32,732
public boolean haveWatch ( String path , EventType watchType ) { return watchedEvents . containsKey ( new NamespaceEventKey ( path , watchType ) ) ; }
Tests if a watch is placed at the given path and of the given type .
32,733
private boolean resubscribe ( ) throws TransactionIdTooOldException , InterruptedException { for ( NamespaceEventKey eventKey : watchedEvents . keySet ( ) ) { NamespaceEvent event = eventKey . getEvent ( ) ; if ( ! subscribe ( event . getPath ( ) , EventType . fromByteValue ( event . getType ( ) ) , watchedEvents . get ( eventKey ) ) ) { return false ; } } return true ; }
Called right after a reconnect to resubscribe to all events . Must be called with the connection lock acquired .
32,734
void waitForTransparentConnect ( ) throws InterruptedException , NotConnectedToServerException { if ( state == DISCONNECTED_VISIBLE ) { LOG . warn ( listeningPort + ": waitForTransparentConnect: got visible" + " disconnected state" ) ; throw new NotConnectedToServerException ( ) ; } while ( state != CONNECTED ) { connectionLock . wait ( ) ; switch ( state ) { case CONNECTED : break ; case DISCONNECTED_HIDDEN : continue ; case DISCONNECTED_VISIBLE : LOG . warn ( listeningPort + ": waitForTransparentConnect: got visible" + " disconnected state" ) ; throw new NotConnectedToServerException ( ) ; } } }
Must be called holding the connection lock returned by getConnectionLock . It waits until the current connection state is CONNECTED . If it ever gets to DISCONNECTED_VISIBLE it will raise an exception . If the current state is CONNECTED then it will return without waiting .
32,735
public boolean isMethodSupported ( String methodName , Class < ? > ... parameterTypes ) throws IOException { if ( serverMethods == null ) { return true ; } Method method ; try { method = protocol . getDeclaredMethod ( methodName , parameterTypes ) ; } catch ( SecurityException e ) { throw new IOException ( e ) ; } catch ( NoSuchMethodException e ) { throw new IOException ( e ) ; } return serverMethods . contains ( Integer . valueOf ( ProtocolSignature . getFingerprint ( method ) ) ) ; }
Check if a method is supported by the server or not
32,736
public static void writePartitionFile ( JobConf conf , Path partFile ) throws IOException { TeraInputFormat inFormat = new TeraInputFormat ( ) ; TextSampler sampler = new TextSampler ( ) ; Text key = new Text ( ) ; Text value = new Text ( ) ; int partitions = conf . getNumReduceTasks ( ) ; long sampleSize = conf . getLong ( SAMPLE_SIZE , 100000 ) ; InputSplit [ ] splits = inFormat . getSplits ( conf , conf . getNumMapTasks ( ) ) ; int samples = Math . min ( 10 , splits . length ) ; long recordsPerSample = sampleSize / samples ; int sampleStep = splits . length / samples ; long records = 0 ; for ( int i = 0 ; i < samples ; ++ i ) { RecordReader < Text , Text > reader = inFormat . getRecordReader ( splits [ sampleStep * i ] , conf , null ) ; while ( reader . next ( key , value ) ) { sampler . addKey ( key ) ; records += 1 ; if ( ( i + 1 ) * recordsPerSample <= records ) { break ; } } } FileSystem outFs = partFile . getFileSystem ( conf ) ; if ( outFs . exists ( partFile ) ) { outFs . delete ( partFile , false ) ; } SequenceFile . Writer writer = SequenceFile . createWriter ( outFs , conf , partFile , Text . class , NullWritable . class ) ; NullWritable nullValue = NullWritable . get ( ) ; for ( Text split : sampler . createPartitions ( partitions ) ) { writer . append ( split , nullValue ) ; } writer . close ( ) ; }
Use the input splits to take samples of the input and generate sample keys . By default reads 100 000 keys from 10 locations in the input sorts them and picks N - 1 keys to generate N equally sized partitions .
32,737
public static void setConf ( Object theObject , Configuration conf , boolean supportJobConf ) { if ( conf != null ) { if ( theObject instanceof Configurable ) { ( ( Configurable ) theObject ) . setConf ( conf ) ; } if ( supportJobConf ) { setJobConf ( theObject , conf ) ; } } }
Check and set configuration if necessary .
32,738
public static < T > T newInstance ( Class < T > theClass , Class < ? > [ ] parameterTypes , Object [ ] initargs ) { if ( parameterTypes . length != initargs . length ) { throw new IllegalArgumentException ( "Constructor parameter types don't match constructor arguments" ) ; } for ( int i = 0 ; i < parameterTypes . length ; i ++ ) { Class < ? > clazz = parameterTypes [ i ] ; if ( ! ( clazz . isInstance ( initargs [ i ] ) ) ) { throw new IllegalArgumentException ( "Object : " + initargs [ i ] + " is not an instance of " + clazz ) ; } } try { Constructor < T > meth = theClass . getDeclaredConstructor ( parameterTypes ) ; meth . setAccessible ( true ) ; return meth . newInstance ( initargs ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
Create an object for the given class .
32,739
public static void logThreadInfo ( Log log , String title , long minInterval ) { boolean dumpStack = false ; if ( log . isInfoEnabled ( ) ) { synchronized ( ReflectionUtils . class ) { long now = System . currentTimeMillis ( ) ; if ( now - previousLogTime >= minInterval * 1000 ) { previousLogTime = now ; dumpStack = true ; } } if ( dumpStack ) { ByteArrayOutputStream buffer = new ByteArrayOutputStream ( ) ; printThreadInfo ( new PrintWriter ( buffer ) , title ) ; log . info ( buffer . toString ( ) ) ; } } }
Log the current thread stacks at INFO level .
32,740
public static void backupFiles ( FileSystem fs , File dest , Configuration conf ) throws IOException { cleanUpAndCheckBackup ( conf , dest ) ; int MAX_ATTEMPT = 3 ; for ( int i = 0 ; i < MAX_ATTEMPT ; i ++ ) { try { String mdate = dateForm . get ( ) . format ( new Date ( System . currentTimeMillis ( ) ) ) ; if ( dest . exists ( ) ) { File tmp = new File ( dest + File . pathSeparator + mdate ) ; FLOG . info ( "Moving aside " + dest + " as " + tmp ) ; if ( ! dest . renameTo ( tmp ) ) { throw new IOException ( "Unable to rename " + dest + " to " + tmp ) ; } FLOG . info ( "Moved aside " + dest + " as " + tmp ) ; } return ; } catch ( IOException e ) { FLOG . error ( "Creating backup exception. Will retry " , e ) ; try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException iex ) { throw new IOException ( iex ) ; } } } throw new IOException ( "Cannot create backup for: " + dest ) ; }
Backup given directory . Enforce that max number of backups has not been reached .
32,741
static void cleanUpAndCheckBackup ( Configuration conf , File origin ) throws IOException { String [ ] backups = getBackups ( origin ) ; File root = origin . getParentFile ( ) ; int copiesToKeep = conf . getInt ( NN_IMAGE_COPIES_TOKEEP , NN_IMAGE_COPIES_TOKEEP_DEFAULT ) ; int daysToKeep = conf . getInt ( NN_IMAGE_DAYS_TOKEEP , NN_IMAGE_DAYS_TOKEEP_DEFAULT ) ; if ( copiesToKeep == 0 && daysToKeep == 0 ) { return ; } deleteOldBackups ( root , backups , daysToKeep , copiesToKeep ) ; backups = getBackups ( origin ) ; if ( backups . length >= copiesToKeep ) { throw new IOException ( "Exceeded maximum number of standby backups of " + origin + " under " + origin . getParentFile ( ) + " max: " + copiesToKeep ) ; } }
Check if we have not exceeded the maximum number of backups .
32,742
static void deleteOldBackups ( File root , String [ ] backups , int daysToKeep , int copiesToKeep ) { Date now = new Date ( System . currentTimeMillis ( ) ) ; int maxIndex = Math . max ( 0 , backups . length - copiesToKeep + 1 ) ; for ( int i = 0 ; i < maxIndex ; i ++ ) { String backup = backups [ i ] ; Date backupDate = null ; try { backupDate = dateForm . get ( ) . parse ( backup . substring ( backup . indexOf ( File . pathSeparator ) + 1 ) ) ; } catch ( ParseException pex ) { } long backupAge = now . getTime ( ) - backupDate . getTime ( ) ; boolean deleteOldBackup = ( daysToKeep > 0 && backupAge > daysToKeep * 24 * 60 * 60 * 1000 ) ; boolean deleteExtraBackup = ( daysToKeep == 0 ) ; if ( deleteOldBackup || deleteExtraBackup ) { try { FLOG . info ( "Deleting backup " + new File ( root , backup ) ) ; FileUtil . fullyDelete ( new File ( root , backup ) ) ; FLOG . info ( "Deleted backup " + new File ( root , backup ) ) ; } catch ( IOException iex ) { FLOG . error ( "Error deleting backup " + new File ( root , backup ) , iex ) ; } } else { break ; } } }
Delete backups according to the retention policy .
32,743
static String [ ] getBackups ( File origin ) { File root = origin . getParentFile ( ) ; final String originName = origin . getName ( ) ; String [ ] backups = root . list ( new FilenameFilter ( ) { public boolean accept ( File dir , String name ) { if ( ! name . startsWith ( originName + File . pathSeparator ) || name . equals ( originName ) ) return false ; try { dateForm . get ( ) . parse ( name . substring ( name . indexOf ( File . pathSeparator ) + 1 ) ) ; } catch ( ParseException pex ) { return false ; } return true ; } } ) ; if ( backups == null ) return new String [ 0 ] ; Arrays . sort ( backups , new Comparator < String > ( ) { public int compare ( String back1 , String back2 ) { try { Date date1 = dateForm . get ( ) . parse ( back1 . substring ( back1 . indexOf ( File . pathSeparator ) + 1 ) ) ; Date date2 = dateForm . get ( ) . parse ( back2 . substring ( back2 . indexOf ( File . pathSeparator ) + 1 ) ) ; return - 1 * date2 . compareTo ( date1 ) ; } catch ( ParseException pex ) { return 0 ; } } } ) ; return backups ; }
List all directories that match the backup pattern . Sort from oldest to newest .
32,744
void doImportCheckpoint ( ) throws IOException { Collection < URI > checkpointDirs = NNStorageConfiguration . getCheckpointDirs ( conf , null ) ; Collection < URI > checkpointEditsDirs = NNStorageConfiguration . getCheckpointEditsDirs ( conf , null ) ; if ( checkpointDirs == null || checkpointDirs . isEmpty ( ) ) { throw new IOException ( "Cannot import image from a checkpoint. " + "\"dfs.namenode.checkpoint.dir\" is not set." ) ; } if ( checkpointEditsDirs == null || checkpointEditsDirs . isEmpty ( ) ) { throw new IOException ( "Cannot import image from a checkpoint. " + "\"dfs.namenode.checkpoint.dir\" is not set." ) ; } FSImage realImage = namesystem . getFSImage ( ) ; assert realImage == this ; FSImage ckptImage = new FSImage ( conf , checkpointDirs , checkpointEditsDirs , null ) ; ckptImage . setFSNamesystem ( namesystem ) ; namesystem . dir . fsImage = ckptImage ; try { ckptImage . recoverTransitionRead ( StartupOption . REGULAR ) ; } finally { ckptImage . close ( ) ; } realImage . storage . setStorageInfo ( ckptImage . storage ) ; realImage . getEditLog ( ) . setLastWrittenTxId ( ckptImage . getEditLog ( ) . getLastWrittenTxId ( ) + 1 ) ; namesystem . dir . fsImage = realImage ; saveNamespace ( ) ; }
Load image from a checkpoint directory and save it into the current one .
32,745
protected void loadFSImage ( ImageInputStream iis , File imageFile ) throws IOException { MD5Hash expectedMD5 = MD5FileUtils . readStoredMd5ForFile ( imageFile ) ; if ( expectedMD5 == null ) { throw new IOException ( "No MD5 file found corresponding to image file " + imageFile ) ; } iis . setImageDigest ( expectedMD5 ) ; loadFSImage ( iis ) ; }
Load the image namespace from the given image file verifying it against the MD5 sum stored in its associated . md5 file .
32,746
String getParent ( String path ) { return path . substring ( 0 , path . lastIndexOf ( Path . SEPARATOR ) ) ; }
Return string representing the parent of the given path .
32,747
protected long loadEdits ( Iterable < EditLogInputStream > editStreams ) throws IOException { long lastAppliedTxId = storage . getMostRecentCheckpointTxId ( ) ; int numLoaded = 0 ; FSEditLogLoader loader = new FSEditLogLoader ( namesystem ) ; for ( EditLogInputStream editIn : editStreams ) { FLOG . info ( "Load Image: Reading edits: " + editIn + " last applied txid#: " + lastAppliedTxId ) ; numLoaded += loader . loadFSEdits ( editIn , lastAppliedTxId ) ; lastAppliedTxId = loader . getLastAppliedTxId ( ) ; } editLog . setLastWrittenTxId ( lastAppliedTxId ) ; FLOG . info ( "Load Image: Number of edit transactions loaded: " + numLoaded + " last applied txid: " + lastAppliedTxId ) ; namesystem . dir . updateCountForINodeWithQuota ( ) ; return numLoaded ; }
Load the specified list of edit files into the image .
32,748
void saveFSImage ( SaveNamespaceContext context , ImageManager im , boolean forceUncompressed ) throws IOException { long txid = context . getTxId ( ) ; OutputStream os = im . getCheckpointOutputStream ( txid ) ; FSImageFormat . Saver saver = new FSImageFormat . Saver ( context ) ; FSImageCompression compression = FSImageCompression . createCompression ( conf , forceUncompressed ) ; saver . save ( os , compression , null , im . toString ( ) ) ; InjectionHandler . processEvent ( InjectionEvent . FSIMAGE_SAVED_IMAGE , txid ) ; storage . setCheckpointImageDigest ( txid , saver . getSavedDigest ( ) ) ; }
Save the contents of the FS image to the file .
32,749
public synchronized void saveNamespace ( boolean forUncompressed ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . FSIMAGE_STARTING_SAVE_NAMESPACE ) ; if ( editLog == null ) { throw new IOException ( "editLog must be initialized" ) ; } storage . attemptRestoreRemovedStorage ( ) ; InjectionHandler . processEvent ( InjectionEvent . FSIMAGE_STARTING_SAVE_NAMESPACE ) ; boolean editLogWasOpen = editLog . isOpen ( ) ; if ( editLogWasOpen ) { editLog . endCurrentLogSegment ( true ) ; } long imageTxId = editLog . getLastWrittenTxId ( ) ; try { InjectionHandler . processEvent ( InjectionEvent . FSIMAGE_CREATING_SAVER_THREADS ) ; saveFSImageInAllDirs ( imageTxId , forUncompressed ) ; storage . writeAll ( ) ; } finally { if ( editLogWasOpen ) { editLog . startLogSegment ( imageTxId + 1 , true ) ; storage . writeTransactionIdFileToStorage ( imageTxId + 1 , this ) ; } saveNamespaceContext . clear ( ) ; } }
Save the contents of the FS image to a new image file in each of the current storage directories .
32,750
boolean confirmFormat ( boolean force , boolean interactive ) throws IOException { List < FormatConfirmable > confirms = Lists . newArrayList ( ) ; for ( StorageDirectory sd : storage . dirIterable ( null ) ) { confirms . add ( sd ) ; } confirms . addAll ( editLog . getFormatConfirmables ( ) ) ; return Storage . confirmFormat ( confirms , force , interactive ) ; }
Check whether the storage directories and non - file journals exist . If running in interactive mode will prompt the user for each directory to allow them to format anyway . Otherwise returns false unless force is specified .
32,751
void rollFSImage ( CheckpointSignature sig ) throws IOException { long start = System . nanoTime ( ) ; sig . validateStorageInfo ( this . storage ) ; saveDigestAndRenameCheckpointImage ( sig . mostRecentCheckpointTxId , sig . imageDigest ) ; long rollTime = DFSUtil . getElapsedTimeMicroSeconds ( start ) ; if ( metrics != null ) { metrics . rollFsImageTime . inc ( rollTime ) ; } }
End checkpoint . Validate the current storage info with the given signature .
32,752
synchronized void saveDigestAndRenameCheckpointImage ( long txid , MD5Hash digest ) throws IOException { if ( ! digest . equals ( storage . getCheckpointImageDigest ( txid ) ) ) { throw new IOException ( "Checkpoint image is corrupt: expecting an MD5 checksum of" + digest + " but is " + storage . getCheckpointImageDigest ( txid ) ) ; } imageSet . saveDigestAndRenameCheckpointImage ( txid , digest ) ; storage . setMostRecentCheckpointTxId ( txid ) ; }
This is called by the 2NN after having downloaded an image and by the NN after having received a new image from the 2NN . It renames the image from fsimage_N . ckpt to fsimage_N and also saves the related . md5 file into place .
32,753
static Collection < File > getCheckpointDirs ( Configuration conf , String defaultName ) { Collection < String > dirNames = conf . getStringCollection ( "fs.checkpoint.dir" ) ; if ( dirNames . size ( ) == 0 && defaultName != null ) { dirNames . add ( defaultName ) ; } Collection < File > dirs = new ArrayList < File > ( dirNames . size ( ) ) ; for ( String name : dirNames ) { dirs . add ( new File ( name ) ) ; } return dirs ; }
Retrieve checkpoint dirs from configuration .
32,754
private void printJobs ( ArrayList < JobInProgress > jobsToInitialize ) { for ( JobInProgress job : jobsToInitialize ) { LOG . info ( "Passing to Initializer Job Id :" + job . getJobID ( ) + " User: " + job . getProfile ( ) . getUser ( ) + " Queue : " + job . getProfile ( ) . getQueueName ( ) ) ; } }
Method used to print log statements about which jobs are being passed to init - threads .
32,755
private void assignThreadsToQueues ( ) { int countOfQueues = jobQueues . size ( ) ; String [ ] queues = ( String [ ] ) jobQueues . keySet ( ) . toArray ( new String [ countOfQueues ] ) ; int numberOfQueuesPerThread = countOfQueues / poolSize ; int numberOfQueuesAssigned = 0 ; for ( int i = 0 ; i < poolSize ; i ++ ) { JobInitializationThread initializer = createJobInitializationThread ( ) ; int batch = ( i * numberOfQueuesPerThread ) ; for ( int j = batch ; j < ( batch + numberOfQueuesPerThread ) ; j ++ ) { initializer . addQueue ( queues [ j ] ) ; threadsToQueueMap . put ( queues [ j ] , initializer ) ; numberOfQueuesAssigned ++ ; } } if ( numberOfQueuesAssigned < countOfQueues ) { int startIndex = 0 ; for ( int i = numberOfQueuesAssigned ; i < countOfQueues ; i ++ ) { JobInitializationThread t = threadsToQueueMap . get ( queues [ startIndex ] ) ; t . addQueue ( queues [ i ] ) ; threadsToQueueMap . put ( queues [ i ] , t ) ; startIndex ++ ; } } }
Method which is used by the poller to assign appropriate worker thread to a queue . The number of threads would be always less than or equal to number of queues in a system . If number of threads is configured to be more than number of queues then poller does not create threads more than number of queues .
32,756
void cleanUpInitializedJobsList ( ) { Iterator < Entry < JobID , JobInProgress > > jobsIterator = initializedJobs . entrySet ( ) . iterator ( ) ; while ( jobsIterator . hasNext ( ) ) { Entry < JobID , JobInProgress > entry = jobsIterator . next ( ) ; JobInProgress job = entry . getValue ( ) ; if ( job . getStatus ( ) . getRunState ( ) == JobStatus . RUNNING ) { if ( isScheduled ( job ) ) { LOG . info ( "Removing scheduled jobs from waiting queue" + job . getJobID ( ) ) ; jobsIterator . remove ( ) ; jobQueueManager . removeJobFromWaitingQueue ( job ) ; continue ; } } if ( job . isComplete ( ) ) { LOG . info ( "Removing killed/completed job from initalized jobs " + "list : " + job . getJobID ( ) ) ; jobsIterator . remove ( ) ; } } }
Method which is used internally to clean up the initialized jobs data structure which the job initialization poller uses to check if a job is initalized or not .
32,757
private boolean isScheduled ( JobInProgress job ) { return ( ( job . pendingMaps ( ) < job . desiredMaps ( ) ) || ( job . pendingReduces ( ) < job . desiredReduces ( ) ) ) ; }
Convenience method to check if job has been scheduled or not .
32,758
void readHeader ( ) throws IOException { int version = in . readInt ( ) ; if ( version != BlockCrcInfoWritable . LATEST_BLOCK_CRC_FILE_VERSION ) { throw new IOException ( "Version " + version + " is not supported." ) ; } numBuckets = in . readInt ( ) ; currentBucket = - 1 ; numRecordsReadInBucket = 0 ; numRecordsInBucket = 0 ; }
Read header of the file
32,759
int moveToNextRecordAndGetItsBucketId ( ) throws IOException { while ( numRecordsReadInBucket >= numRecordsInBucket ) { if ( currentBucket + 1 >= numBuckets ) { return - 1 ; } else { numRecordsInBucket = in . readInt ( ) ; currentBucket ++ ; numRecordsReadInBucket = 0 ; } } return currentBucket ; }
Find the bucket ID for the next record . If current bucket hasn t yet been finished then the current bucket ID will be returned . Otherwise it will keep reading the input file until it finds the next non - empty bucket and return this bucket s ID .
32,760
BlockCrcInfoWritable getNextRecord ( ) throws IOException { if ( moveToNextRecordAndGetItsBucketId ( ) == - 1 ) { return null ; } BlockCrcInfoWritable crcInfo = new BlockCrcInfoWritable ( ) ; crcInfo . readFields ( in ) ; numRecordsReadInBucket ++ ; return crcInfo ; }
Get information for the next blockCRC record . NULL if not more left .
32,761
public static Job startOneJob ( Worker newWorker , Priority pri , Set < String > jobFiles , long detectTime , AtomicLong numFilesSubmitted , AtomicLong lastCheckingTime , long maxPendingJobs ) throws IOException , InterruptedException , ClassNotFoundException { if ( lastCheckingTime != null ) { lastCheckingTime . set ( System . currentTimeMillis ( ) ) ; } String startTimeStr = dateFormat . format ( new Date ( ) ) ; String jobName = newWorker . JOB_NAME_PREFIX + "." + newWorker . jobCounter + "." + pri + "-pri" + "." + startTimeStr ; Job job = null ; synchronized ( jobFiles ) { if ( jobFiles . size ( ) == 0 ) { return null ; } newWorker . jobCounter ++ ; synchronized ( newWorker . jobIndex ) { if ( newWorker . jobIndex . size ( ) >= maxPendingJobs ) { return null ; } job = newWorker . startJob ( jobName , jobFiles , pri , detectTime ) ; } numFilesSubmitted . addAndGet ( jobFiles . size ( ) ) ; jobFiles . clear ( ) ; } return job ; }
Return true if succeed to start one job
32,762
private Map < Integer , Integer > getLostStripes ( Configuration conf , FileStatus stat , FileSystem fs ) throws IOException { Map < Integer , Integer > lostStripes = new HashMap < Integer , Integer > ( ) ; RaidInfo raidInfo = RaidUtils . getFileRaidInfo ( stat , conf ) ; if ( raidInfo . codec == null ) { return lostStripes ; } Codec codec = raidInfo . codec ; if ( codec . isDirRaid ) { RaidUtils . collectDirectoryCorruptBlocksInStripe ( conf , ( DistributedFileSystem ) fs , raidInfo , stat , lostStripes ) ; } else { RaidUtils . collectFileCorruptBlocksInStripe ( ( DistributedFileSystem ) fs , raidInfo , stat , lostStripes ) ; } return lostStripes ; }
Get the lost blocks numbers per stripe in the source file .
32,763
protected Map < String , Integer > getLostFiles ( Pattern pattern , String [ ] dfsckArgs ) throws IOException { Map < String , Integer > lostFiles = new HashMap < String , Integer > ( ) ; BufferedReader reader = getLostFileReader ( dfsckArgs ) ; String line = reader . readLine ( ) ; while ( ( line = reader . readLine ( ) ) != null ) { Matcher m = pattern . matcher ( line ) ; if ( ! m . find ( ) ) { continue ; } String fileName = m . group ( 1 ) . trim ( ) ; Integer numLost = lostFiles . get ( fileName ) ; numLost = numLost == null ? 0 : numLost ; numLost += 1 ; lostFiles . put ( fileName , numLost ) ; } LOG . info ( "FSCK returned " + lostFiles . size ( ) + " files with args " + Arrays . toString ( dfsckArgs ) ) ; RaidUtils . filterTrash ( getConf ( ) , lostFiles . keySet ( ) . iterator ( ) ) ; LOG . info ( "getLostFiles returning " + lostFiles . size ( ) + " files with args " + Arrays . toString ( dfsckArgs ) ) ; return lostFiles ; }
Gets a list of lost files from the name node via DFSck
32,764
public BlockIntegrityMonitor . Status getAggregateStatus ( ) { Status fixer = corruptionWorker . getStatus ( ) ; Status copier = decommissioningWorker . getStatus ( ) ; List < JobStatus > jobs = new ArrayList < JobStatus > ( ) ; List < JobStatus > simFailedJobs = new ArrayList < JobStatus > ( ) ; List < JobStatus > failedJobs = new ArrayList < JobStatus > ( ) ; List < String > highPriFileNames = new ArrayList < String > ( ) ; int numHighPriFiles = 0 ; int numLowPriFiles = 0 ; int numLowestPriFiles = 0 ; if ( fixer != null ) { jobs . addAll ( fixer . jobs ) ; simFailedJobs . addAll ( fixer . simFailJobs ) ; failedJobs . addAll ( fixer . failJobs ) ; if ( fixer . highPriorityFileNames != null ) { highPriFileNames . addAll ( fixer . highPriorityFileNames ) ; } numHighPriFiles += fixer . highPriorityFiles ; numLowPriFiles += fixer . lowPriorityFiles ; numLowestPriFiles += fixer . lowestPriorityFiles ; } if ( copier != null ) { jobs . addAll ( copier . jobs ) ; simFailedJobs . addAll ( copier . simFailJobs ) ; failedJobs . addAll ( copier . failJobs ) ; if ( copier . highPriorityFileNames != null ) { highPriFileNames . addAll ( copier . highPriorityFileNames ) ; } numHighPriFiles += copier . highPriorityFiles ; numLowPriFiles += copier . lowPriorityFiles ; numLowestPriFiles += copier . lowestPriorityFiles ; } return new Status ( numHighPriFiles , numLowPriFiles , numLowestPriFiles , jobs , highPriFileNames , failedJobs , simFailedJobs ) ; }
Get the status of the entire block integrity monitor . The status returned represents the aggregation of the statuses of all the integrity monitor s components .
32,765
public final T makeCall ( ) throws IOException { while ( true ) { try { return call ( ) ; } catch ( ConnectException e ) { try { reconnectToNewJobTracker ( 0 ) ; } catch ( IOException f ) { LOG . error ( "Fallback process failed with " , f ) ; throw e ; } } catch ( IOException e ) { handleIOException ( e ) ; } } }
Template function to make the call . Throws if can not fallback .
32,766
private final void reconnectToNewJobTracker ( int connectNum ) throws IOException { if ( connectNum >= CONNECT_MAX_NUMBER ) { LOG . error ( "reconnectToNewJobTracker has reached its max number." ) ; throw new IOException ( "reconnectToNewJobTracker has reached its max number." ) ; } InetSocketAddress secondaryTracker = getSecondaryTracker ( ) ; JobConf conf = getConf ( ) ; InetSocketAddress oldAddress = getCurrentClientAddress ( ) ; LOG . info ( "Falling back from " + oldAddress + " to secondary tracker at " + secondaryTracker + " with " + connectNum + " try" ) ; if ( secondaryTracker == null ) throw new IOException ( "Secondary address not provided." ) ; shutdown ( ) ; InterCoronaJobTrackerProtocol secondaryClient = RPC . waitForProxy ( InterCoronaJobTrackerProtocol . class , InterCoronaJobTrackerProtocol . versionID , secondaryTracker , conf , SECONDARY_TRACKER_CONNECT_TIMEOUT ) ; InetSocketAddressWritable oldAddrWritable = new InetSocketAddressWritable ( oldAddress ) ; InetSocketAddressWritable newAddress = null ; int retryNum = 0 ; do { newAddress = secondaryClient . getNewJobTrackerAddress ( oldAddrWritable ) ; try { waitRetry ( ) ; } catch ( InterruptedException e ) { LOG . error ( "Fallback interrupted, taking next retry." ) ; } ++ retryNum ; } while ( newAddress == null && predRetry ( retryNum ) ) ; if ( newAddress == null || newAddress . getAddress ( ) == null ) throw new IOException ( "Failed to obtain new job tracker address." ) ; RPC . stopProxy ( secondaryClient ) ; try { connect ( newAddress . getAddress ( ) ) ; LOG . info ( "Fallback process successful: " + newAddress . getAddress ( ) ) ; } catch ( IOException e ) { LOG . error ( "Fallback connect to " + newAddress . getAddress ( ) + " failed for " , e ) ; reconnectToNewJobTracker ( ++ connectNum ) ; } }
Reconnects to new address obtained from secondary address via InterCoronaTrackerProtocol
32,767
public static ResourceCalculatorPlugin getResourceCalculatorPlugin ( Class < ? extends ResourceCalculatorPlugin > clazz , Configuration conf ) { if ( clazz != null ) { return ReflectionUtils . newInstance ( clazz , conf ) ; } try { String osName = System . getProperty ( "os.name" ) ; if ( osName . startsWith ( "Linux" ) ) { return new LinuxResourceCalculatorPlugin ( ) ; } } catch ( SecurityException se ) { return new NullResourceCalculatorPlugin ( ) ; } return new NullResourceCalculatorPlugin ( ) ; }
Get the ResourceCalculatorPlugin from the class name and configure it . If class name is null this method will try and return a memory calculator plugin available for this system .
32,768
private void performReads ( ReadResult readResult ) throws InterruptedException { long start = System . currentTimeMillis ( ) ; for ( int i = 0 ; i < streams . length ; ) { boolean acquired = slots . tryAcquire ( 1 , 10 , TimeUnit . SECONDS ) ; reporter . progress ( ) ; if ( acquired ) { readPool . execute ( new ReadOperation ( readResult , i ) ) ; i ++ ; } } while ( true ) { boolean acquired = slots . tryAcquire ( numThreads , 10 , TimeUnit . SECONDS ) ; reporter . progress ( ) ; if ( acquired ) { slots . release ( numThreads ) ; break ; } } readTime += ( System . currentTimeMillis ( ) - start ) ; }
Performs a batch of reads from the given streams and waits for the reads to finish .
32,769
private void logImbalancedNodes ( ) { if ( LOG . isInfoEnabled ( ) ) { int underUtilized = 0 , overUtilized = 0 ; for ( BalancerDatanode node : this . datanodes . values ( ) ) { if ( isUnderUtilized ( node ) ) underUtilized ++ ; else if ( isOverUtilized ( node ) ) overUtilized ++ ; } StringBuilder msg = new StringBuilder ( ) ; msg . append ( overUtilized ) ; msg . append ( " over utilized nodes:" ) ; for ( BalancerDatanode node : this . datanodes . values ( ) ) { if ( isOverUtilized ( node ) ) { msg . append ( " " ) ; msg . append ( node . getName ( ) ) ; } } LOG . info ( msg ) ; msg = new StringBuilder ( ) ; msg . append ( underUtilized ) ; msg . append ( " under utilized nodes: " ) ; for ( BalancerDatanode node : this . datanodes . values ( ) ) { if ( isUnderUtilized ( node ) ) { msg . append ( " " ) ; msg . append ( node . getName ( ) ) ; } } LOG . info ( msg ) ; } }
Log the over utilized & under utilized nodes
32,770
private void logPlanOutcome ( ) { if ( LOG . isInfoEnabled ( ) ) { LOG . info ( "Predicted plan outcome: bytesLeftToMove: " + bytesLeftToMove + ", bytesToMove: " + bytesToMove ) ; for ( BalancerDatanode node : this . datanodes . values ( ) ) { LOG . info ( node . getName ( ) + " remaining: " + node . getCurrentRemaining ( ) ) ; } } }
Log node utilization after the plan execution
32,771
private void scheduleTask ( Source source , long size , Target target ) { NodeTask nodeTask = new NodeTask ( target , size ) ; source . addNodeTask ( nodeTask ) ; target . addNodeTask ( nodeTask ) ; sources . add ( source ) ; targets . add ( target ) ; LOG . info ( "scheduled " + size + " bytes : " + source . getName ( ) + " -> " + target . getName ( ) ) ; }
Pairs up given nodes in balancing plan
32,772
public static void logDataDistribution ( DatanodeInfo [ ] report ) { if ( LOG . isInfoEnabled ( ) ) { double avgRemaining = computeAvgRemaining ( Arrays . asList ( report ) ) ; StringBuilder msg = new StringBuilder ( "Data distribution report: avgRemaining " + avgRemaining ) ; for ( DatanodeInfo node : report ) { msg . append ( "\n" ) . append ( node . getName ( ) ) ; msg . append ( " remaining " ) . append ( getRemaining ( node ) ) ; msg . append ( " raw " ) . append ( node . getRemaining ( ) ) . append ( " / " ) . append ( node . getCapacity ( ) ) ; } LOG . info ( msg ) ; } }
Prints data distribution based on report from NameNode
32,773
public void setJarByClass ( Class cls ) { String jar = findContainingJar ( cls ) ; if ( jar != null ) { setJar ( jar ) ; } }
Set the job s jar file by finding an example class location .
32,774
public void deleteLocalFiles ( ) throws IOException { String [ ] localDirs = getLocalDirs ( ) ; for ( int i = 0 ; i < localDirs . length ; i ++ ) { FileSystem . getLocal ( this ) . delete ( new Path ( localDirs [ i ] ) ) ; } }
Use MRAsyncDiskService . moveAndDeleteAllVolumes instead .
32,775
public Path getWorkingDirectory ( ) { String name = get ( "mapred.working.dir" ) ; if ( name != null ) { return new Path ( name ) ; } else { try { Path dir = FileSystem . get ( this ) . getWorkingDirectory ( ) ; set ( "mapred.working.dir" , dir . toString ( ) ) ; return dir ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } }
Get the current working directory for the default file system .
32,776
public long getMemoryForMapTask ( ) { long value = getDeprecatedMemoryValue ( ) ; if ( value == DISABLED_MEMORY_LIMIT ) { value = normalizeMemoryConfigValue ( getLong ( JobConf . MAPRED_JOB_MAP_MEMORY_MB_PROPERTY , DISABLED_MEMORY_LIMIT ) ) ; } return value ; }
Get memory required to run a map task of the job in MB .
32,777
public long getMemoryForReduceTask ( ) { long value = getDeprecatedMemoryValue ( ) ; if ( value == DISABLED_MEMORY_LIMIT ) { value = normalizeMemoryConfigValue ( getLong ( JobConf . MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY , DISABLED_MEMORY_LIMIT ) ) ; } return value ; }
Get memory required to run a reduce task of the job in MB .
32,778
int computeNumSlotsPerMap ( long slotSizePerMap ) { if ( ( slotSizePerMap == DISABLED_MEMORY_LIMIT ) || ( getMemoryForMapTask ( ) == DISABLED_MEMORY_LIMIT ) ) { return 1 ; } return ( int ) ( Math . ceil ( ( float ) getMemoryForMapTask ( ) / ( float ) slotSizePerMap ) ) ; }
Compute the number of slots required to run a single map task - attempt of this job .
32,779
int computeNumSlotsPerReduce ( long slotSizePerReduce ) { if ( ( slotSizePerReduce == DISABLED_MEMORY_LIMIT ) || ( getMemoryForReduceTask ( ) == DISABLED_MEMORY_LIMIT ) ) { return 1 ; } return ( int ) ( Math . ceil ( ( float ) getMemoryForReduceTask ( ) / ( float ) slotSizePerReduce ) ) ; }
Compute the number of slots required to run a single reduce task - attempt of this job .
32,780
private static String findContainingJar ( Class my_class ) { ClassLoader loader = my_class . getClassLoader ( ) ; String class_file = my_class . getName ( ) . replaceAll ( "\\." , "/" ) + ".class" ; try { for ( Enumeration itr = loader . getResources ( class_file ) ; itr . hasMoreElements ( ) ; ) { URL url = ( URL ) itr . nextElement ( ) ; if ( "jar" . equals ( url . getProtocol ( ) ) ) { String toReturn = url . getPath ( ) ; if ( toReturn . startsWith ( "file:" ) ) { toReturn = toReturn . substring ( "file:" . length ( ) ) ; } toReturn = URLDecoder . decode ( toReturn , "UTF-8" ) ; return toReturn . replaceAll ( "!.*$" , "" ) ; } } } catch ( IOException e ) { throw new RuntimeException ( e ) ; } return null ; }
Find a jar that contains a class of the same name if any . It will return a jar file even if that is not the first thing on the class path that has a class with the same name .
32,781
public static void overrideConfiguration ( JobConf conf , int instance ) { final String CONFIG_KEYS [ ] = new String [ ] { "mapred.job.tracker" , "mapred.local.dir" , "mapred.fairscheduler.server.address" } ; for ( String configKey : CONFIG_KEYS ) { String value = conf . get ( configKey + "-" + instance ) ; if ( value != null ) { conf . set ( configKey , value ) ; } else { LOG . warn ( "Configuration " + configKey + "-" + instance + " not found." ) ; } } }
Replce the jobtracker configuration with the configuration of 0 or 1 instance . This allows switching two sets of configurations in the command line option .
32,782
private static String constructMessage ( String property , String newVal , String oldVal ) { String message = "Could not change property " + property ; if ( oldVal != null ) { message += " from \'" + oldVal ; } if ( newVal != null ) { message += "\' to \'" + newVal + "\'" ; } return message ; }
Construct the exception message .
32,783
void recoverTransitionRead ( DataNode datanode , NamespaceInfo nsInfo , Collection < File > dataDirs , StartupOption startOpt ) throws IOException { assert FSConstants . LAYOUT_VERSION == nsInfo . getLayoutVersion ( ) : "Block-pool and name-node layout versions must be the same." ; this . storageDirs = new ArrayList < StorageDirectory > ( dataDirs . size ( ) ) ; ArrayList < StorageState > dataDirStates = new ArrayList < StorageState > ( dataDirs . size ( ) ) ; for ( Iterator < File > it = dataDirs . iterator ( ) ; it . hasNext ( ) ; ) { File dataDir = it . next ( ) ; StorageDirectory sd = new StorageDirectory ( dataDir , null , false ) ; StorageState curState ; try { curState = sd . analyzeStorage ( startOpt ) ; switch ( curState ) { case NORMAL : break ; case NON_EXISTENT : LOG . info ( "Storage directory " + dataDir + " does not exist." ) ; it . remove ( ) ; continue ; case NOT_FORMATTED : LOG . info ( "Storage directory " + dataDir + " is not formatted." ) ; if ( ! sd . isEmpty ( ) ) { LOG . error ( "Storage directory " + dataDir + " is not empty, and will not be formatted! Exiting." ) ; throw new IOException ( "Storage directory " + dataDir + " is not empty!" ) ; } LOG . info ( "Formatting ..." ) ; format ( sd , nsInfo ) ; break ; default : sd . doRecover ( curState ) ; } } catch ( IOException ioe ) { sd . unlock ( ) ; throw ioe ; } addStorageDir ( sd ) ; dataDirStates . add ( curState ) ; } if ( dataDirs . size ( ) == 0 ) throw new IOException ( "All specified directories are not accessible or do not exist." ) ; doTransition ( datanode , nsInfo , startOpt ) ; this . writeAll ( ) ; }
Analyze storage directories . Recover from previous transitions if required .
32,784
protected void setFields ( Properties props , StorageDirectory sd ) throws IOException { props . setProperty ( NAMESPACE_ID , String . valueOf ( namespaceID ) ) ; props . setProperty ( CHECK_TIME , String . valueOf ( cTime ) ) ; props . setProperty ( LAYOUT_VERSION , String . valueOf ( layoutVersion ) ) ; }
Set layoutVersion namespaceID and blockpoolID into namespace storage VERSION file
32,785
private void setNameSpaceID ( File storage , String nsid ) throws InconsistentFSStateException { if ( nsid == null || nsid . equals ( "" ) ) { throw new InconsistentFSStateException ( storage , "file " + STORAGE_FILE_VERSION + " is invalid." ) ; } int newNsId = Integer . parseInt ( nsid ) ; if ( namespaceID > 0 && namespaceID != newNsId ) { throw new InconsistentFSStateException ( storage , "Unexepcted namespaceID " + nsid + " . Expected " + namespaceID ) ; } namespaceID = newNsId ; }
Validate and set namespace ID
32,786
private void doUpgrade ( List < StorageDirectory > sds , List < StorageInfo > sdsInfo , final NamespaceInfo nsInfo ) throws IOException { assert sds . size ( ) == sdsInfo . size ( ) ; UpgradeThread [ ] upgradeThreads = new UpgradeThread [ sds . size ( ) ] ; for ( int i = 0 ; i < upgradeThreads . length ; i ++ ) { final StorageDirectory sd = sds . get ( i ) ; final StorageInfo si = sdsInfo . get ( i ) ; UpgradeThread thread = new UpgradeThread ( sd , si , nsInfo ) ; thread . start ( ) ; upgradeThreads [ i ] = thread ; } for ( UpgradeThread thread : upgradeThreads ) { try { thread . join ( ) ; } catch ( InterruptedException e ) { throw ( InterruptedIOException ) new InterruptedIOException ( ) . initCause ( e ) ; } } for ( UpgradeThread thread : upgradeThreads ) { if ( thread . error != null ) throw new IOException ( thread . error ) ; } this . layoutVersion = FSConstants . LAYOUT_VERSION ; assert this . namespaceID == nsInfo . getNamespaceID ( ) : "Data-node and name-node layout versions must be the same." ; this . cTime = nsInfo . getCTime ( ) ; for ( StorageDirectory sd : sds ) { sd . write ( ) ; File prevDir = sd . getPreviousDir ( ) ; File tmpDir = sd . getPreviousTmp ( ) ; rename ( tmpDir , prevDir ) ; LOG . info ( "Upgrade of " + sd . getRoot ( ) + " is complete." ) ; } }
Move current storage into a backup directory and hardlink all its blocks into the new current directory .
32,787
public void readFields ( DataInput in ) throws IOException { byte version = in . readByte ( ) ; if ( version != getVersion ( ) ) throw new VersionMismatchException ( getVersion ( ) , version ) ; }
javadoc from Writable
32,788
private void failTasksWithMaxMemory ( long memoryToRelease ) { List < TaskAttemptID > allTasks = new ArrayList < TaskAttemptID > ( ) ; allTasks . addAll ( processTreeInfoMap . keySet ( ) ) ; Collections . sort ( allTasks , new Comparator < TaskAttemptID > ( ) { public int compare ( TaskAttemptID tid1 , TaskAttemptID tid2 ) { return processTreeInfoMap . get ( tid2 ) . getMemoryUsed ( ) > processTreeInfoMap . get ( tid1 ) . getMemoryUsed ( ) ? 1 : - 1 ; } } ) ; long memoryReleased = 0 ; while ( memoryReleased < memoryToRelease && ! allTasks . isEmpty ( ) ) { TaskAttemptID tid = allTasks . remove ( 0 ) ; if ( ! isKillable ( tid ) ) { continue ; } long memoryUsed = processTreeInfoMap . get ( tid ) . getMemoryUsed ( ) ; if ( memoryUsed == 0 ) { break ; } tasksToKill . add ( tid ) ; memoryReleased += memoryUsed ; } if ( tasksToKill . isEmpty ( ) ) { LOG . error ( "The total memory usage is over CGroup limits. " + "But found no alive task to kill for freeing memory." ) ; } else if ( memoryReleased < memoryToRelease ) { LOG . error ( "The total memory usage is over CGroup limits. " + "But uanble to find enough tasks to kill for freeing memory." ) ; } killTasks ( ) ; }
Starting from the tasks use the highest amount of memory fail the tasks until the memory released meets the requirement
32,789
private void killTask ( TaskAttemptID tid , String msg , boolean wasFailure ) { taskTracker . cleanUpOverMemoryTask ( tid , wasFailure , msg ) ; CGroupProcessTreeInfo ptInfo = processTreeInfoMap . get ( tid ) ; try { LinuxSystemCall . killProcessGroup ( Integer . parseInt ( ptInfo . getPID ( ) ) ) ; } catch ( java . io . IOException e ) { LOG . error ( "Could not kill process group " + ptInfo . getPID ( ) , e ) ; } processTreeInfoMap . remove ( tid ) ; LOG . info ( "Removed ProcessTree with root " + ptInfo . getPID ( ) ) ; }
Kill the task and clean up CGroupProcessTreeInfo
32,790
private boolean isKillable ( TaskAttemptID tid ) { TaskInProgress tip = taskTracker . runningTasks . get ( tid ) ; return tip != null && ! tip . wasKilled ( ) && ( tip . getRunState ( ) == TaskStatus . State . RUNNING || tip . getRunState ( ) == TaskStatus . State . COMMIT_PENDING ) ; }
Check if a task can be killed to increase free memory
32,791
public String getResponse ( String [ ] argv ) throws IOException { String result = "" ; if ( argv . length < 1 ) { return result ; } if ( argv [ 0 ] . equals ( "-all" ) ) { result += rpcCollector . getClusterUtilization ( ) ; result += JobUtilization . legendString + JobUtilization . unitString ; for ( JobUtilization job : rpcCollector . getAllRunningJobUtilization ( ) ) { result += job ; } result += TaskTrackerUtilization . legendString + TaskTrackerUtilization . unitString ; for ( TaskTrackerUtilization tt : rpcCollector . getAllTaskTrackerUtilization ( ) ) { result += tt ; } return result ; } if ( argv [ 0 ] . equals ( "-cluster" ) ) { result += rpcCollector . getClusterUtilization ( ) ; return result ; } if ( argv [ 0 ] . equals ( "-job" ) ) { result += JobUtilization . legendString + JobUtilization . unitString ; if ( argv . length == 1 ) { for ( JobUtilization job : rpcCollector . getAllRunningJobUtilization ( ) ) { result += job ; } return result ; } for ( int i = 1 ; i < argv . length ; i ++ ) { result += rpcCollector . getJobUtilization ( argv [ i ] ) ; } return result ; } if ( argv [ 0 ] . equals ( "-tasktracker" ) ) { result += TaskTrackerUtilization . legendString + TaskTrackerUtilization . unitString ; if ( argv . length == 1 ) { for ( TaskTrackerUtilization tt : rpcCollector . getAllTaskTrackerUtilization ( ) ) { result += tt ; } return result ; } for ( int i = 1 ; i < argv . length ; i ++ ) { result += rpcCollector . getTaskTrackerUtilization ( argv [ i ] ) ; } return result ; } return result ; }
Obtain the result to print in command line
32,792
void splitKeyVal ( byte [ ] line , Text key , Text val ) throws IOException { int pos = UTF8ByteArrayUtils . findNthByte ( line , ( byte ) this . getFieldSeparator ( ) , this . getNumOfKeyFields ( ) ) ; try { if ( pos == - 1 ) { key . set ( line ) ; val . set ( "" ) ; } else { UTF8ByteArrayUtils . splitKeyVal ( line , key , val , pos ) ; } } catch ( CharacterCodingException e ) { LOG . warn ( StringUtils . stringifyException ( e ) ) ; } }
Split a line into key and value . Assume the delimitor is a tab .
32,793
void write ( Writable value ) throws IOException { byte [ ] bval ; int valSize ; if ( value instanceof BytesWritable ) { BytesWritable val = ( BytesWritable ) value ; bval = val . get ( ) ; valSize = val . getSize ( ) ; } else if ( value instanceof Text ) { Text val = ( Text ) value ; bval = val . getBytes ( ) ; valSize = val . getLength ( ) ; } else { String sval = value . toString ( ) ; bval = sval . getBytes ( "UTF-8" ) ; valSize = bval . length ; } clientOut_ . write ( bval , 0 , valSize ) ; }
Write a writable value to the output stream using UTF - 8 encoding
32,794
private void cleanUpHistory ( ) { long oldestAllowedTimestamp = System . currentTimeMillis ( ) - historyLength ; int trashedNotifications = 0 ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "History cleanup: Checking old notifications to remove from history list ..." ) ; } HistoryTreeEntry key = new HistoryTreeEntry ( oldestAllowedTimestamp , 0 , ( byte ) 0 ) ; int notificationsCount = 0 ; historyLock . writeLock ( ) . lock ( ) ; try { notificationsCount = orderedHistoryList . size ( ) ; LOG . warn ( "History cleanup: size of the history before cleanup: " + notificationsCount ) ; if ( ! historyLimitDisabled && notificationsCount > historyLimit ) { LOG . warn ( "History cleanup: Reached physical limit. Number of stored notifications: " + notificationsCount + ". Clearing ..." ) ; } int index = Collections . binarySearch ( orderedHistoryList , key , comparatorByTS ) ; int toDeleteByTS = index >= 0 ? index : - ( index + 1 ) ; int toDeleteByLimit = historyLimitDisabled ? 0 : notificationsCount - ( int ) historyLimit ; toDeleteByLimit = toDeleteByLimit > 0 ? toDeleteByLimit : 0 ; int toDelete = Math . max ( toDeleteByTS , toDeleteByLimit ) ; if ( toDelete > 0 ) { LOG . warn ( "History cleanup: number of the history to cleanup: " + toDelete ) ; for ( int i = 0 ; i < toDelete ; i ++ ) { orderedHistoryList . get ( i ) . removeFromTree ( ) ; } orderedHistoryList . subList ( 0 , toDelete ) . clear ( ) ; if ( toDeleteByLimit > toDeleteByTS ) { trashedNotifications ++ ; } notificationsCount = orderedHistoryList . size ( ) ; LOG . warn ( "History cleanup: size of the history after cleanup: " + notificationsCount ) ; cleanUpHistoryTree ( historyTree ) ; } } finally { historyLock . writeLock ( ) . unlock ( ) ; } core . getMetrics ( ) . trashedHistoryNotifications . inc ( trashedNotifications ) ; core . getMetrics ( ) . historySize . set ( notificationsCount ) ; core . getMetrics ( ) . historyQueues . set ( historyQueuesCount ) ; }
Checks if there are notifications in our tree which are older than historyLength . It removes does which are older .
32,795
private void cleanUpHistoryTree ( HistoryNode node ) { if ( node == null || node . children == null ) { return ; } Iterator < HistoryNode > iterator = node . children . iterator ( ) ; while ( iterator . hasNext ( ) ) { HistoryNode child = iterator . next ( ) ; cleanUpHistoryTree ( child ) ; if ( shouldRemoveNode ( child ) ) { iterator . remove ( ) ; } } }
Clean up the Tree by DFS traversal .
32,796
private boolean shouldRemoveNode ( HistoryNode node ) { if ( node == null ) { return true ; } int sizeOfChildren = 0 ; if ( node . children != null ) { sizeOfChildren = node . children . size ( ) ; } if ( sizeOfChildren > 0 ) { return false ; } int sizeOfNotifications = 0 ; if ( node . notifications != null ) { for ( List < HistoryTreeEntry > notiList : node . notifications . values ( ) ) { if ( notiList != null ) { sizeOfNotifications += notiList . size ( ) ; if ( sizeOfNotifications > 0 ) { return false ; } } } } return true ; }
Should remove the node from the history tree if both the notifications and children list are empty .
32,797
public void storeNotification ( NamespaceNotification notification ) { int notificationsCount = 0 ; historyLock . writeLock ( ) . lock ( ) ; try { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Storing into history: " + NotifierUtils . asString ( notification ) ) ; } String [ ] paths = DFSUtil . split ( notification . path , Path . SEPARATOR_CHAR ) ; long timestamp = System . currentTimeMillis ( ) ; HistoryTreeEntry entry = new HistoryTreeEntry ( timestamp , notification . txId , notification . type ) ; HistoryNode node = historyTree ; for ( String path : paths ) { if ( path . trim ( ) . length ( ) == 0 ) { continue ; } node = node . addOrGetChild ( path ) ; } if ( node . notifications == null ) { node . notifications = new HashMap < Byte , List < HistoryTreeEntry > > ( ) ; } if ( ! node . notifications . containsKey ( notification . type ) ) { node . notifications . put ( notification . type , new LinkedList < HistoryTreeEntry > ( ) ) ; } entry . node = node ; node . notifications . get ( notification . type ) . add ( entry ) ; orderedHistoryList . add ( entry ) ; notificationsCount = orderedHistoryList . size ( ) ; } finally { historyLock . writeLock ( ) . unlock ( ) ; } core . getMetrics ( ) . historySize . set ( notificationsCount ) ; core . getMetrics ( ) . historyQueues . set ( historyQueuesCount ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Notification stored." ) ; } }
Called when we should store a notification in the our history . The timestamp used to store it is generated when this method is called .
32,798
public void addNotificationsToQueue ( NamespaceEvent event , long txId , Queue < NamespaceNotification > notifications ) throws TransactionIdTooOldException { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Got addNotificationsToQueue for: " + NotifierUtils . asString ( event ) + " and txId: " + txId ) ; } historyLock . readLock ( ) . lock ( ) ; try { if ( orderedHistoryList == null || orderedHistoryList . size ( ) == 0 ) { throw new TransactionIdTooOldException ( "No data in history." ) ; } if ( orderedHistoryList . get ( 0 ) . txnId > txId || orderedHistoryList . get ( orderedHistoryList . size ( ) - 1 ) . txnId < txId ) { throw new TransactionIdTooOldException ( "No data in history for txId " + txId ) ; } int index = Collections . binarySearch ( orderedHistoryList , new HistoryTreeEntry ( 0 , txId , event . type ) , comparatorByID ) ; if ( index < 0 ) { LOG . error ( "Potential corrupt history. Got request for: " + NotifierUtils . asString ( event ) + " and txId: " + txId ) ; throw new TransactionIdTooOldException ( "Potentially corrupt server history" ) ; } String dirFormatPath = event . path ; if ( ! dirFormatPath . endsWith ( Path . SEPARATOR ) ) { dirFormatPath += Path . SEPARATOR ; } for ( int i = index + 1 ; i < orderedHistoryList . size ( ) ; i ++ ) { HistoryTreeEntry entry = orderedHistoryList . get ( i ) ; if ( event . type != entry . type ) { continue ; } String entryPath = entry . getFullPath ( ) ; if ( entryPath . startsWith ( dirFormatPath ) ) { notifications . add ( new NamespaceNotification ( entryPath , entry . type , entry . txnId ) ) ; } } } finally { historyLock . readLock ( ) . unlock ( ) ; } }
Checks what notifications are saved in history for the given event and adds those notifications in the given queue . Only the notifications which happened strictly after the edit log operations with the given transaction id are put in the queue . The notifications are put in the queue in the order of their transaction id .
32,799
private void printProgress ( long read , long size ) { int progress = Math . min ( 100 , ( int ) ( ( 100 * read ) / size ) ) ; if ( progress > lastProgress ) { lastProgress = progress ; System . out . println ( "Completed " + lastProgress + " % " ) ; } }
Print the progress .