idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,600
private void processParityHarPartBlock ( FileSystem dfs , Path partFile , long blockOffset , FileStatus partFileStat , HarIndex harIndex , File localBlockFile , Progressable progress ) throws IOException { String partName = partFile . toUri ( ) . getPath ( ) ; partName = partName . substring ( 1 + partName . lastIndexOf ( Path . SEPARATOR ) ) ; OutputStream out = new FileOutputStream ( localBlockFile ) ; try { final long blockEnd = Math . min ( blockOffset + partFileStat . getBlockSize ( ) , partFileStat . getLen ( ) ) ; for ( long offset = blockOffset ; offset < blockEnd ; ) { HarIndex . IndexEntry entry = harIndex . findEntry ( partName , offset ) ; if ( entry == null ) { String msg = "Lost index file has no matching index entry for " + partName + ":" + offset ; LOG . warn ( msg ) ; throw new IOException ( msg ) ; } Path parityFile = new Path ( entry . fileName ) ; Encoder encoder = null ; for ( Codec codec : Codec . getCodecs ( ) ) { if ( isParityFile ( parityFile , codec ) ) { encoder = new Encoder ( getConf ( ) , codec ) ; } } if ( encoder == null ) { String msg = "Could not figure out codec correctly for " + parityFile ; LOG . warn ( msg ) ; throw new IOException ( msg ) ; } Path srcFile = RaidUtils . sourcePathFromParityPath ( parityFile , dfs ) ; if ( null == srcFile ) { String msg = "Can not find the source path for parity file: " + parityFile ; LOG . warn ( msg ) ; throw new IOException ( msg ) ; } FileStatus srcStat = dfs . getFileStatus ( srcFile ) ; if ( srcStat . getModificationTime ( ) != entry . mtime ) { String msg = "Modification times of " + parityFile + " and " + srcFile + " do not match." ; LOG . warn ( msg ) ; throw new IOException ( msg ) ; } long lostOffsetInParity = offset - entry . startOffset ; LOG . info ( partFile + ":" + offset + " maps to " + parityFile + ":" + lostOffsetInParity + " and will be recovered from " + srcFile ) ; encoder . recoverParityBlockToStream ( dfs , srcStat , srcStat . getBlockSize ( ) , parityFile , lostOffsetInParity , out , progress ) ; offset += srcStat . getBlockSize ( ) ; LOG . info ( "Recovered " + srcStat . getBlockSize ( ) + " part file bytes " ) ; if ( offset > blockEnd ) { String msg = "Recovered block spills across part file blocks. Cannot continue" ; throw new IOException ( msg ) ; } progress . progress ( ) ; } } finally { out . close ( ) ; } }
This reconstructs a single part file block by recovering in sequence each parity block in the part file block .
32,601
DataInputStream computeMetadata ( Configuration conf , InputStream dataStream ) throws IOException { ByteArrayOutputStream mdOutBase = new ByteArrayOutputStream ( 1024 * 1024 ) ; DataOutputStream mdOut = new DataOutputStream ( mdOutBase ) ; mdOut . writeShort ( FSDataset . FORMAT_VERSION_NON_INLINECHECKSUM ) ; int bytesPerChecksum = conf . getInt ( "io.bytes.per.checksum" , 512 ) ; DataChecksum sum = DataChecksum . newDataChecksum ( DataChecksum . CHECKSUM_CRC32 , bytesPerChecksum ) ; sum . writeHeader ( mdOut ) ; byte [ ] buf = new byte [ bytesPerChecksum ] ; byte [ ] chk = new byte [ sum . getChecksumSize ( ) ] ; int bytesSinceFlush = 0 ; while ( true ) { int bytesRead = dataStream . read ( buf , bytesSinceFlush , bytesPerChecksum - bytesSinceFlush ) ; if ( bytesRead == - 1 ) { if ( bytesSinceFlush > 0 ) { boolean reset = true ; sum . writeValue ( chk , 0 , reset ) ; mdOut . write ( chk , 0 , chk . length ) ; bytesSinceFlush = 0 ; } break ; } sum . update ( buf , bytesSinceFlush , bytesRead ) ; bytesSinceFlush += bytesRead ; if ( bytesSinceFlush == bytesPerChecksum ) { boolean reset = true ; sum . writeValue ( chk , 0 , reset ) ; mdOut . write ( chk , 0 , chk . length ) ; bytesSinceFlush = 0 ; } } byte [ ] mdBytes = mdOutBase . toByteArray ( ) ; return new DataInputStream ( new ByteArrayInputStream ( mdBytes ) ) ; }
Reads data from the data stream provided and computes metadata .
32,602
private void sendReconstructedBlock ( String datanode , final FileInputStream blockContents , final DataInputStream metadataIn , Block block , long blockSize , int dataTransferVersion , int namespaceId , Progressable progress ) throws IOException { InetSocketAddress target = NetUtils . createSocketAddr ( datanode ) ; Socket sock = SocketChannel . open ( ) . socket ( ) ; int readTimeout = getConf ( ) . getInt ( BlockIntegrityMonitor . BLOCKFIX_READ_TIMEOUT , HdfsConstants . READ_TIMEOUT ) ; NetUtils . connect ( sock , target , readTimeout ) ; sock . setSoTimeout ( readTimeout ) ; int writeTimeout = getConf ( ) . getInt ( BlockIntegrityMonitor . BLOCKFIX_WRITE_TIMEOUT , HdfsConstants . WRITE_TIMEOUT ) ; OutputStream baseStream = NetUtils . getOutputStream ( sock , writeTimeout ) ; DataOutputStream out = new DataOutputStream ( new BufferedOutputStream ( baseStream , FSConstants . SMALL_BUFFER_SIZE ) ) ; boolean corruptChecksumOk = false ; boolean chunkOffsetOK = false ; boolean verifyChecksum = true ; boolean transferToAllowed = false ; try { LOG . info ( "Sending block " + block + " from " + sock . getLocalSocketAddress ( ) . toString ( ) + " to " + sock . getRemoteSocketAddress ( ) . toString ( ) ) ; BlockSender blockSender = new BlockSender ( namespaceId , block , blockSize , 0 , blockSize , corruptChecksumOk , chunkOffsetOK , verifyChecksum , transferToAllowed , dataTransferVersion >= DataTransferProtocol . PACKET_INCLUDE_VERSION_VERSION , new BlockWithChecksumFileReader . InputStreamWithChecksumFactory ( ) { public InputStream createStream ( long offset ) throws IOException { return blockContents ; } public DataInputStream getChecksumStream ( ) throws IOException { return metadataIn ; } public BlockDataFile . Reader getBlockDataFileReader ( ) throws IOException { return BlockDataFile . getDummyDataFileFromFileChannel ( blockContents . getChannel ( ) ) . getReader ( null ) ; } } ) ; WriteBlockHeader header = new WriteBlockHeader ( new VersionAndOpcode ( dataTransferVersion , DataTransferProtocol . OP_WRITE_BLOCK ) ) ; header . set ( namespaceId , block . getBlockId ( ) , block . getGenerationStamp ( ) , 0 , false , true , new DatanodeInfo ( ) , 0 , null , "" ) ; header . writeVersionAndOpCode ( out ) ; header . write ( out ) ; blockSender . sendBlock ( out , baseStream , null , progress ) ; LOG . info ( "Sent block " + block + " to " + datanode ) ; } finally { sock . close ( ) ; out . close ( ) ; } }
Send a generated block to a datanode .
32,603
public static UserGroupInformation login ( Configuration conf ) throws LoginException { if ( LOGIN_UGI == null ) { LOGIN_UGI = UnixUserGroupInformation . login ( conf ) ; } return LOGIN_UGI ; }
Login and return a UserGroupInformation object .
32,604
public static UserGroupInformation getUGI ( Configuration conf ) throws LoginException { UserGroupInformation ugi = null ; if ( conf . getBoolean ( UGI_SOURCE , true ) ) { ugi = UnixUserGroupInformation . readFromConf ( conf , UnixUserGroupInformation . UGI_PROPERTY_NAME ) ; } else { ugi = UserGroupInformation . getCurrentUGI ( ) ; } if ( ugi == null ) { ugi = UnixUserGroupInformation . login ( ) ; UnixUserGroupInformation . saveToConf ( conf , UnixUserGroupInformation . UGI_PROPERTY_NAME , ( UnixUserGroupInformation ) ugi ) ; } return ugi ; }
get the current user id
32,605
public < T > T doAs ( PrivilegedAction < T > action ) { return Subject . doAs ( null , action ) ; }
Run the given action as the user .
32,606
public < T > T doAs ( PrivilegedExceptionAction < T > action ) throws IOException , InterruptedException { try { return Subject . doAs ( null , action ) ; } catch ( PrivilegedActionException pae ) { Throwable cause = pae . getCause ( ) ; LOG . error ( "PriviledgedActionException as:" + this + " cause:" + cause ) ; if ( cause instanceof IOException ) { throw ( IOException ) cause ; } else if ( cause instanceof Error ) { throw ( Error ) cause ; } else if ( cause instanceof RuntimeException ) { throw ( RuntimeException ) cause ; } else if ( cause instanceof InterruptedException ) { throw ( InterruptedException ) cause ; } else { throw new UndeclaredThrowableException ( pae , "Unknown exception in doAs" ) ; } } }
Run the given action as the user potentially throwing an exception .
32,607
public void map ( K1 arg0 , V1 arg1 , OutputCollector < Text , Text > arg2 , Reporter arg3 ) throws IOException { throw new IOException ( "should not be called\n" ) ; }
Do nothing . Should not be called
32,608
public static RaidInfo getFileRaidInfo ( final FileStatus stat , Configuration conf , boolean skipHarChecking ) throws IOException { ParityFilePair ppair = null ; for ( Codec c : Codec . getCodecs ( ) ) { ppair = ParityFilePair . getParityFile ( c , stat , conf , skipHarChecking ) ; if ( ppair != null ) { return new RaidInfo ( c , ppair , c . parityLength ) ; } } return new RaidInfo ( null , ppair , 0 ) ; }
returns the raid for a given file
32,609
private static BlockLocation [ ] getParityBlocks ( final Path filePath , final long blockSize , final long numStripes , final RaidInfo raidInfo ) throws IOException { FileSystem parityFS = raidInfo . parityPair . getFileSystem ( ) ; FileStatus parityFileStatus = raidInfo . parityPair . getFileStatus ( ) ; long parityFileLength = parityFileStatus . getLen ( ) ; if ( parityFileLength != numStripes * raidInfo . parityBlocksPerStripe * blockSize ) { throw new IOException ( "expected parity file of length" + ( numStripes * raidInfo . parityBlocksPerStripe * blockSize ) + " but got parity file of length " + parityFileLength ) ; } BlockLocation [ ] parityBlocks = parityFS . getFileBlockLocations ( parityFileStatus , 0L , parityFileLength ) ; if ( parityFS instanceof DistributedFileSystem || parityFS instanceof DistributedRaidFileSystem ) { long parityBlockSize = parityFileStatus . getBlockSize ( ) ; if ( parityBlockSize != blockSize ) { throw new IOException ( "file block size is " + blockSize + " but parity file block size is " + parityBlockSize ) ; } } else if ( parityFS instanceof HarFileSystem ) { LOG . debug ( "HAR FS found" ) ; } else { LOG . warn ( "parity file system is not of a supported type" ) ; } return parityBlocks ; }
gets the parity blocks corresponding to file returns the parity blocks in case of DFS and the part blocks containing parity blocks in case of HAR FS
32,610
private static void checkParityBlocks ( final Path filePath , final Map < Integer , Integer > corruptBlocksPerStripe , final long blockSize , final long startStripeIdx , final long endStripeIdx , final long numStripes , final RaidInfo raidInfo ) throws IOException { BlockLocation [ ] containerBlocks = getParityBlocks ( filePath , blockSize , numStripes , raidInfo ) ; long parityStripeLength = blockSize * ( ( long ) raidInfo . parityBlocksPerStripe ) ; long parityBlocksFound = 0L ; for ( BlockLocation cb : containerBlocks ) { if ( cb . getLength ( ) % blockSize != 0 ) { throw new IOException ( "container block size is not " + "multiple of parity block size" ) ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "found container with offset " + cb . getOffset ( ) + ", length " + cb . getLength ( ) ) ; } for ( long offset = cb . getOffset ( ) ; offset < cb . getOffset ( ) + cb . getLength ( ) ; offset += blockSize ) { long block = offset / blockSize ; int stripe = ( int ) ( offset / parityStripeLength ) ; if ( stripe < 0 ) { continue ; } if ( stripe >= numStripes ) { break ; } parityBlocksFound ++ ; if ( stripe < startStripeIdx || stripe >= endStripeIdx ) { continue ; } if ( isBlockCorrupt ( cb ) ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "parity file for " + filePath . toString ( ) + " corrupt in block " + block + ", stripe " + stripe + "/" + numStripes ) ; } incCorruptBlocksPerStripe ( corruptBlocksPerStripe , stripe ) ; } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "parity file for " + filePath . toString ( ) + " OK in block " + block + ", stripe " + stripe + "/" + numStripes ) ; } } } } long parityBlocksExpected = raidInfo . parityBlocksPerStripe * numStripes ; if ( parityBlocksFound != parityBlocksExpected ) { throw new IOException ( "expected " + parityBlocksExpected + " parity blocks but got " + parityBlocksFound ) ; } }
checks the parity blocks for a given file and modifies corruptBlocksPerStripe accordingly
32,611
public static Path sourcePathFromParityPath ( Path parityPath , FileSystem fs ) throws IOException { String parityPathStr = parityPath . toUri ( ) . getPath ( ) ; for ( Codec codec : Codec . getCodecs ( ) ) { String prefix = codec . getParityPrefix ( ) ; if ( parityPathStr . startsWith ( prefix ) ) { String src = parityPathStr . replaceFirst ( prefix , Path . SEPARATOR ) ; Path srcPath = new Path ( src ) ; if ( fs . exists ( srcPath ) ) { return srcPath ; } } } return null ; }
returns the source file corresponding to a parity file
32,612
public static void updateChunkChecksum ( byte [ ] buf , int checksumOff , int dataOff , int dataLen , DataChecksum checksum ) throws IOException { int bytesPerChecksum = checksum . getBytesPerChecksum ( ) ; int checksumSize = checksum . getChecksumSize ( ) ; int curChecksumOff = checksumOff ; int curDataOff = dataOff ; int numChunks = ( dataLen + bytesPerChecksum - 1 ) / bytesPerChecksum ; int dataLeft = dataLen ; for ( int i = 0 ; i < numChunks ; i ++ ) { int len = Math . min ( dataLeft , bytesPerChecksum ) ; checksum . reset ( ) ; checksum . update ( buf , curDataOff , len ) ; checksum . writeValue ( buf , curChecksumOff , false ) ; curDataOff += len ; curChecksumOff += checksumSize ; dataLeft -= len ; } }
updates the checksum for a buffer
32,613
public static long copyBytesAndGenerateCRC ( InputStream in , OutputStream out , int buffSize , boolean close , IOThrottler throttler ) throws IOException { PrintStream ps = out instanceof PrintStream ? ( PrintStream ) out : null ; byte buf [ ] = new byte [ buffSize ] ; Checksum sum = new NativeCrc32 ( ) ; sum . reset ( ) ; try { if ( throttler != null ) { throttler . throttle ( ( long ) buffSize ) ; } int bytesRead = in . read ( buf ) ; while ( bytesRead >= 0 ) { sum . update ( buf , 0 , bytesRead ) ; out . write ( buf , 0 , bytesRead ) ; if ( ( ps != null ) && ps . checkError ( ) ) { throw new IOException ( "Unable to write to output stream." ) ; } if ( throttler != null ) { throttler . throttle ( ( long ) buffSize ) ; } bytesRead = in . read ( buf ) ; } } finally { if ( close ) { out . close ( ) ; in . close ( ) ; } } return sum . getValue ( ) ; }
Copies from one stream to another and generate CRC checksum .
32,614
public static void writeFully ( FileChannel fc , ByteBuffer buf , long offset ) throws IOException { do { offset += fc . write ( buf , offset ) ; } while ( buf . remaining ( ) > 0 ) ; }
Write a ByteBuffer to a FileChannel at a given offset handling short writes .
32,615
public static void cloneInto ( Writable dst , Writable src ) throws IOException { ReflectionUtils . cloneWritableInto ( dst , src ) ; }
Make a copy of the writable object using serialiation to a buffer
32,616
public static String readStringSafely ( DataInput in , int maxLength ) throws IOException , IllegalArgumentException { int length = readVInt ( in ) ; if ( length < 0 || length > maxLength ) { throw new IllegalArgumentException ( "Encoded byte size for String was " + length + ", which is outside of 0.." + maxLength + " range." ) ; } byte [ ] bytes = new byte [ length ] ; in . readFully ( bytes , 0 , length ) ; return Text . decode ( bytes ) ; }
Read a string but check it for sanity . The format consists of a vint followed by the given number of bytes .
32,617
protected void flushAndSync ( boolean durable ) throws IOException { if ( outputStream == null ) { throw new IOException ( "Trying to use aborted output stream!" ) ; } if ( doubleBuf . isFlushed ( ) ) { return ; } doubleBuf . flushTo ( outputStream ) ; }
Flush the contents of the ready buffer to a durable BookKeeper ledger BookKeeper ledger . After this method returns we are guaranteed to have persisted the log records stored in the ready buffer to a quorum of bookies that store the underlying ledger .
32,618
public void waitForRestart ( ) { if ( standbyThread != null ) { try { standbyThread . join ( ) ; } catch ( InterruptedException ie ) { } standbyThread = null ; LOG . info ( "waitForRestart: Standby thread exited." ) ; InjectionHandler . processEvent ( InjectionEvent . AVATARNODE_WAIT_FOR_RESTART ) ; while ( failoverState == FailoverState . START_FAILOVER || failoverState == FailoverState . AWAIT_FAILOVER ) { LOG . info ( "Current state : " + failoverState + ". Waiting for failover ...." ) ; try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException ie ) { throw new RuntimeException ( "waitForRestart() interrupted" ) ; } } if ( getAvatar ( ) == Avatar . STANDBY ) { runInfo . isRunning = false ; LOG . info ( "waitForRestart Stopping encapsulated namenode." ) ; super . stop ( ) ; super . join ( ) ; shutdownStandby ( ) ; LOG . info ( "waitForRestart exiting" ) ; return ; } } super . join ( ) ; }
Wait for the StandbyNode to exit . If it does then stop the underlying namenode .
32,619
public long getProtocolVersion ( String protocol , long clientVersion ) throws IOException { if ( protocol . equals ( AvatarProtocol . class . getName ( ) ) ) { return AvatarProtocol . versionID ; } else { return super . getProtocolVersion ( protocol , clientVersion ) ; } }
If the specified protocol is AvatarProtocol then return the AvatarProtocol version id otherwise delegate to the underlying namenode .
32,620
private void verifyEditStreams ( ) throws IOException { if ( getFSImage ( ) . getEditLog ( ) . isSharedJournalAvailable ( ) && InjectionHandler . trueCondition ( InjectionEvent . AVATARNODE_CHECKEDITSTREAMS ) ) { return ; } int expectedEditStreams = NNStorageConfiguration . getNamespaceEditsDirs ( confg ) . size ( ) ; int actualEditStreams = this . namesystem . getFSImage ( ) . getEditLog ( ) . getNumberOfAvailableJournals ( ) ; if ( expectedEditStreams == actualEditStreams && InjectionHandler . trueCondition ( InjectionEvent . AVATARNODE_CHECKEDITSTREAMS ) ) { return ; } String msg = "Failover: Cannot proceed - shared journal is not available. " + "Number of required edit streams: " + expectedEditStreams + " current number: " + actualEditStreams ; LOG . fatal ( msg ) ; throw new IOException ( msg ) ; }
Return true if the shared journal is active or if the number of active journals is equal to the number of configured journals . Throw IOException otherwise .
32,621
public synchronized void shutdown ( boolean synchronous ) throws IOException { LOG . info ( "Failover: Asynchronous shutdown for: " + currentAvatar ) ; super . namesystem . checkSuperuserPrivilege ( ) ; if ( runInfo . shutdown ) { LOG . info ( "Failover: Node already shut down" ) ; return ; } verifyEditStreams ( ) ; runInfo . shutdown = true ; Thread shutdownThread = new ShutdownAvatarThread ( this ) ; shutdownThread . setName ( "ShutDown thread for : " + serverAddress ) ; shutdownThread . setDaemon ( false ) ; shutdownThread . start ( ) ; if ( synchronous ) { LOG . info ( "Failover: Waiting for shutdown to complete" ) ; try { shutdownThread . join ( ) ; } catch ( InterruptedException ie ) { throw new IOException ( ie ) ; } } }
Shuts down the avatar node
32,622
protected void stopRPC ( boolean interruptClientHandlers ) throws IOException { try { stopRPCInternal ( server , "avatardatanode" , interruptClientHandlers ) ; super . stopRPC ( interruptClientHandlers ) ; stopWaitRPCInternal ( server , "avatardatanode" ) ; } catch ( InterruptedException ex ) { throw new IOException ( "stopRPC() interrupted" , ex ) ; } }
Stops all RPC threads and ensures that all RPC handlers have exited . Stops all communication to the namenode .
32,623
private static void printUsage ( ) { System . err . println ( "Usage: java AvatarNode [" + StartupOption . STANDBY . getName ( ) + "] | [" + StartupOption . NODEZERO . getName ( ) + "] | [" + StartupOption . NODEONE . getName ( ) + "] | [" + StartupOption . FORMAT . getName ( ) + "] | [" + StartupOption . UPGRADE . getName ( ) + "] | [" + StartupOption . ROLLBACK . getName ( ) + "] | [" + StartupOption . FINALIZE . getName ( ) + "] | [" + StartupOption . IMPORT . getName ( ) + "]" ) ; }
Help message for a user
32,624
static void validateStartupOptions ( StartupInfo startInfo ) throws IOException { if ( startInfo . isStandby ) { if ( startInfo . startOpt == StartupOption . FORMAT || startInfo . startOpt == StartupOption . FINALIZE || startInfo . startOpt == StartupOption . ROLLBACK || startInfo . startOpt == StartupOption . UPGRADE ) { throw new IOException ( "Standby avatar node cannot be started with " + startInfo . startOpt + " option." ) ; } } }
validates command line arguments
32,625
private static StartupInfo parseArguments ( String args [ ] ) { InstanceId instance = InstanceId . NODEZERO ; StartupOption startOpt = StartupOption . REGULAR ; boolean isStandby = false ; String serviceName = null ; boolean force = false ; int argsLen = ( args == null ) ? 0 : args . length ; for ( int i = 0 ; i < argsLen ; i ++ ) { String cmd = args [ i ] ; if ( StartupOption . SERVICE . getName ( ) . equalsIgnoreCase ( cmd ) ) { if ( ++ i < argsLen ) { serviceName = args [ i ] ; } else { return null ; } } else if ( StartupOption . STANDBY . getName ( ) . equalsIgnoreCase ( cmd ) ) { isStandby = true ; } else if ( StartupOption . NODEZERO . getName ( ) . equalsIgnoreCase ( cmd ) ) { instance = InstanceId . NODEZERO ; } else if ( StartupOption . NODEONE . getName ( ) . equalsIgnoreCase ( cmd ) ) { instance = InstanceId . NODEONE ; } else if ( StartupOption . FORMAT . getName ( ) . equalsIgnoreCase ( cmd ) ) { startOpt = StartupOption . FORMAT ; } else if ( StartupOption . FORMATFORCE . getName ( ) . equalsIgnoreCase ( cmd ) ) { startOpt = StartupOption . FORMATFORCE ; } else if ( StartupOption . REGULAR . getName ( ) . equalsIgnoreCase ( cmd ) ) { startOpt = StartupOption . REGULAR ; } else if ( StartupOption . UPGRADE . getName ( ) . equalsIgnoreCase ( cmd ) ) { startOpt = StartupOption . UPGRADE ; } else if ( StartupOption . ROLLBACK . getName ( ) . equalsIgnoreCase ( cmd ) ) { startOpt = StartupOption . ROLLBACK ; } else if ( StartupOption . FINALIZE . getName ( ) . equalsIgnoreCase ( cmd ) ) { startOpt = StartupOption . FINALIZE ; } else if ( StartupOption . IMPORT . getName ( ) . equalsIgnoreCase ( cmd ) ) { startOpt = StartupOption . IMPORT ; } else if ( StartupOption . FORCE . getName ( ) . equalsIgnoreCase ( cmd ) ) { force = true ; } else { return null ; } } return new StartupInfo ( startOpt , instance , isStandby , serviceName , force ) ; }
Analyze the command line options
32,626
public static void initializeGenericKeys ( Configuration conf , String serviceKey ) { if ( ( serviceKey == null ) || serviceKey . isEmpty ( ) ) { return ; } NameNode . initializeGenericKeys ( conf , serviceKey ) ; DFSUtil . setGenericConf ( conf , serviceKey , AVATARSERVICE_SPECIFIC_KEYS ) ; adjustMetaDirectoryNames ( conf , serviceKey ) ; }
In federation configuration is set for a set of avartanodes namenodes etc which are grouped under a logical nameservice ID . The configuration keys specific to them have suffix set to configured nameserviceId .
32,627
public static void adjustMetaDirectoryNames ( Configuration conf , String serviceKey ) { adjustMetaDirectoryName ( conf , DFS_SHARED_NAME_DIR0_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_SHARED_NAME_DIR1_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_SHARED_EDITS_DIR0_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_SHARED_EDITS_DIR1_KEY , serviceKey ) ; }
Append service name to each avatar meta directory name
32,628
private static void isPrimaryAlive ( String zkRegistry ) throws IOException { String parts [ ] = zkRegistry . split ( ":" ) ; if ( parts . length != 2 ) { throw new IllegalArgumentException ( "Invalid Address : " + zkRegistry ) ; } String host = parts [ 0 ] ; int port = Integer . parseInt ( parts [ 1 ] ) ; InetSocketAddress clientSocket = new InetSocketAddress ( host , port ) ; ServerSocket socket = new ServerSocket ( ) ; socket . bind ( clientSocket ) ; socket . close ( ) ; }
Tries to bind to the address specified in ZooKeeper this will always fail if the primary is alive either on the same machine or on a remote machine .
32,629
static InetSocketAddress getRemoteNamenodeAddress ( Configuration conf , InstanceId instance ) throws IOException { String fs = null ; if ( instance == InstanceId . NODEZERO ) { fs = conf . get ( DFS_NAMENODE_RPC_ADDRESS1_KEY ) ; if ( fs == null ) fs = conf . get ( "fs.default.name1" ) ; } else if ( instance == InstanceId . NODEONE ) { fs = conf . get ( DFS_NAMENODE_RPC_ADDRESS0_KEY ) ; if ( fs == null ) fs = conf . get ( "fs.default.name0" ) ; } else { throw new IOException ( "Unknown instance " + instance ) ; } if ( fs != null ) { Configuration newConf = new Configuration ( conf ) ; newConf . set ( FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY , fs ) ; conf = newConf ; } return NameNode . getClientProtocolAddress ( conf ) ; }
Returns the address of the remote namenode
32,630
static String getRemoteNamenodeHttpName ( Configuration conf , InstanceId instance ) throws IOException { if ( instance == InstanceId . NODEZERO ) { return conf . get ( "dfs.http.address1" ) ; } else if ( instance == InstanceId . NODEONE ) { return conf . get ( "dfs.http.address0" ) ; } else { throw new IOException ( "Unknown instance " + instance ) ; } }
Returns the name of the http server of the local namenode
32,631
private void setProxy ( String proxyStr ) { String [ ] strs = proxyStr . split ( ":" , 2 ) ; if ( strs . length != 2 ) throw new RuntimeException ( "Bad SOCKS proxy parameter: " + proxyStr ) ; String host = strs [ 0 ] ; int port = Integer . parseInt ( strs [ 1 ] ) ; this . proxy = new Proxy ( Proxy . Type . SOCKS , InetSocketAddress . createUnresolved ( host , port ) ) ; }
Set the proxy of this socket factory as described in the string parameter
32,632
public void addResourceMetadata ( String resourceName , ResourceMetadata resourceMetadata ) { if ( resourceMetadataMap . put ( resourceName , resourceMetadata ) != null ) { throw new RuntimeException ( "Resource name " + resourceName + " already exists!" ) ; } }
Add resource metadata for this pool .
32,633
public ResourceMetadata getResourceMetadata ( String resourceName ) { if ( ! resourceMetadataMap . containsKey ( resourceName ) ) { throw new RuntimeException ( "No resource metadata for " + resourceName ) ; } return resourceMetadataMap . get ( resourceName ) ; }
Get resource metadata for a resource name
32,634
private void readClusterNodeInfo ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "clusterNodeInfo" ) ; clusterNodeInfo = new ClusterNodeInfo ( ) ; coronaSerializer . readStartObjectToken ( "clusterNodeInfo" ) ; coronaSerializer . readField ( "name" ) ; clusterNodeInfo . name = coronaSerializer . readValueAs ( String . class ) ; coronaSerializer . readField ( "address" ) ; clusterNodeInfo . address = coronaSerializer . readValueAs ( InetAddress . class ) ; coronaSerializer . readField ( "total" ) ; clusterNodeInfo . total = coronaSerializer . readValueAs ( ComputeSpecs . class ) ; coronaSerializer . readField ( "free" ) ; clusterNodeInfo . free = coronaSerializer . readValueAs ( ComputeSpecs . class ) ; coronaSerializer . readField ( "resourceInfos" ) ; clusterNodeInfo . resourceInfos = coronaSerializer . readValueAs ( Map . class ) ; coronaSerializer . readEndObjectToken ( "clusterNodeInfo" ) ; }
Reads the clusterNodeInfo object from the JSON stream
32,635
private void readGrants ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readStartObjectToken ( "grants" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { coronaSerializer . readStartObjectToken ( "grant" ) ; coronaSerializer . readField ( "grantId" ) ; GrantId grantId = new GrantId ( coronaSerializer ) ; coronaSerializer . readField ( "grant" ) ; ResourceRequestInfo resourceRequestInfo = new ResourceRequestInfo ( coronaSerializer ) ; coronaSerializer . readEndObjectToken ( "grant" ) ; addGrant ( grantId . getSessionId ( ) , resourceRequestInfo ) ; current = coronaSerializer . nextToken ( ) ; } }
Reads the list of grants from the JSON stream
32,636
public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "clusterNodeInfo" ) ; jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeStringField ( "name" , clusterNodeInfo . name ) ; jsonGenerator . writeObjectField ( "address" , clusterNodeInfo . address ) ; jsonGenerator . writeObjectField ( "total" , clusterNodeInfo . total ) ; jsonGenerator . writeObjectField ( "free" , clusterNodeInfo . free ) ; jsonGenerator . writeObjectField ( "resourceInfos" , clusterNodeInfo . resourceInfos ) ; jsonGenerator . writeEndObject ( ) ; jsonGenerator . writeFieldName ( "grants" ) ; jsonGenerator . writeStartObject ( ) ; for ( Map . Entry < GrantId , ResourceRequestInfo > entry : grants . entrySet ( ) ) { jsonGenerator . writeFieldName ( entry . getKey ( ) . unique ) ; jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "grantId" ) ; entry . getKey ( ) . write ( jsonGenerator ) ; jsonGenerator . writeFieldName ( "grant" ) ; entry . getValue ( ) . write ( jsonGenerator ) ; jsonGenerator . writeEndObject ( ) ; } jsonGenerator . writeEndObject ( ) ; jsonGenerator . writeEndObject ( ) ; }
Used to write the state of the ClusterNode instance to disk when we are persisting the state of the NodeManager
32,637
public void initResourceTypeToMaxCpuMap ( Map < Integer , Map < ResourceType , Integer > > cpuToResourcePartitioning ) { resourceTypeToMaxCpu = getResourceTypeToCountMap ( ( int ) clusterNodeInfo . total . numCpus , cpuToResourcePartitioning ) ; }
This method is used to initialize the resource type to max CPU mapping based upon the cpuToResourcePartitioning instance given
32,638
public static Map < ResourceType , Integer > getResourceTypeToCountMap ( int numCpus , Map < Integer , Map < ResourceType , Integer > > cpuToResourcePartitioning ) { Map < ResourceType , Integer > ret = cpuToResourcePartitioning . get ( numCpus ) ; if ( ret == null ) { Map < ResourceType , Integer > oneCpuMap = cpuToResourcePartitioning . get ( 1 ) ; if ( oneCpuMap == null ) { throw new RuntimeException ( "No matching entry for cpu count: " + numCpus + " in node and no 1 cpu map" ) ; } ret = new EnumMap < ResourceType , Integer > ( ResourceType . class ) ; for ( ResourceType key : oneCpuMap . keySet ( ) ) { ret . put ( key , oneCpuMap . get ( key ) . intValue ( ) * numCpus ) ; } } return ret ; }
Get a mapping of the resource type to amount of resources for a given number of cpus .
32,639
private void processINodesUC ( DataInputStream in , ImageVisitor v , boolean skipBlocks ) throws IOException { int numINUC = in . readInt ( ) ; v . visitEnclosingElement ( ImageElement . INODES_UNDER_CONSTRUCTION , ImageElement . NUM_INODES_UNDER_CONSTRUCTION , numINUC ) ; for ( int i = 0 ; i < numINUC ; i ++ ) { checkInterruption ( ) ; v . visitEnclosingElement ( ImageElement . INODE_UNDER_CONSTRUCTION ) ; byte [ ] name = FSImageSerialization . readBytes ( in ) ; String n = new String ( name , "UTF8" ) ; v . visit ( ImageElement . INODE_PATH , n ) ; if ( LayoutVersion . supports ( Feature . ADD_INODE_ID , imageVersion ) ) { v . visit ( ImageElement . INODE_ID , in . readLong ( ) ) ; } v . visit ( ImageElement . REPLICATION , in . readShort ( ) ) ; v . visit ( ImageElement . MODIFICATION_TIME , formatDate ( in . readLong ( ) ) ) ; v . visit ( ImageElement . PREFERRED_BLOCK_SIZE , in . readLong ( ) ) ; int numBlocks = in . readInt ( ) ; processBlocks ( in , v , numBlocks , skipBlocks ) ; processPermission ( in , v ) ; v . visit ( ImageElement . CLIENT_NAME , FSImageSerialization . readString ( in ) ) ; v . visit ( ImageElement . CLIENT_MACHINE , FSImageSerialization . readString ( in ) ) ; int numLocs = in . readInt ( ) ; for ( int j = 0 ; j < numLocs ; j ++ ) { in . readShort ( ) ; in . readLong ( ) ; in . readLong ( ) ; in . readLong ( ) ; in . readInt ( ) ; FSImageSerialization . readString ( in ) ; FSImageSerialization . readString ( in ) ; WritableUtils . readEnum ( in , AdminStates . class ) ; } v . leaveEnclosingElement ( ) ; } v . leaveEnclosingElement ( ) ; }
Process the INodes under construction section of the fsimage .
32,640
private void processBlocks ( DataInputStream in , ImageVisitor v , int numBlocks , boolean skipBlocks ) throws IOException { v . visitEnclosingElement ( ImageElement . BLOCKS , ImageElement . NUM_BLOCKS , numBlocks ) ; if ( numBlocks == - 1 || numBlocks == - 2 ) { v . leaveEnclosingElement ( ) ; return ; } if ( skipBlocks ) { int fieldsBytes = Long . SIZE * 3 ; if ( LayoutVersion . supports ( Feature . BLOCK_CHECKSUM , imageVersion ) ) { fieldsBytes += Integer . SIZE ; } int bytesToSkip = ( ( fieldsBytes ) / 8 ) * numBlocks ; if ( in . skipBytes ( bytesToSkip ) != bytesToSkip ) throw new IOException ( "Error skipping over blocks" ) ; } else { for ( int j = 0 ; j < numBlocks ; j ++ ) { v . visitEnclosingElement ( ImageElement . BLOCK ) ; v . visit ( ImageElement . BLOCK_ID , in . readLong ( ) ) ; v . visit ( ImageElement . NUM_BYTES , in . readLong ( ) ) ; v . visit ( ImageElement . GENERATION_STAMP , in . readLong ( ) ) ; if ( LayoutVersion . supports ( Feature . BLOCK_CHECKSUM , imageVersion ) ) { v . visit ( ImageElement . BLOCK_CHECKSUM , in . readInt ( ) ) ; } v . leaveEnclosingElement ( ) ; } } v . leaveEnclosingElement ( ) ; }
Process the blocks section of the fsimage .
32,641
private void processPermission ( DataInputStream in , ImageVisitor v ) throws IOException { v . visitEnclosingElement ( ImageElement . PERMISSIONS ) ; v . visit ( ImageElement . USER_NAME , Text . readStringOpt ( in ) ) ; v . visit ( ImageElement . GROUP_NAME , Text . readStringOpt ( in ) ) ; FsPermission fsp = new FsPermission ( in . readShort ( ) ) ; v . visit ( ImageElement . PERMISSION_STRING , fsp . toString ( ) ) ; v . leaveEnclosingElement ( ) ; }
Extract the INode permissions stored in the fsimage file .
32,642
private void processINodes ( DataInputStream in , ImageVisitor v , long numInodes , boolean skipBlocks ) throws IOException { v . visitEnclosingElement ( ImageElement . INODES , ImageElement . NUM_INODES , numInodes ) ; if ( LayoutVersion . supports ( Feature . FSIMAGE_NAME_OPTIMIZATION , imageVersion ) ) { processLocalNameINodes ( in , v , numInodes , skipBlocks ) ; } else { processFullNameINodes ( in , v , numInodes , skipBlocks ) ; } v . leaveEnclosingElement ( ) ; }
Process the INode records stored in the fsimage .
32,643
private void processINode ( DataInputStream in , ImageVisitor v , boolean skipBlocks , String parentName ) throws IOException { checkInterruption ( ) ; v . visitEnclosingElement ( ImageElement . INODE ) ; String pathName = FSImageSerialization . readString ( in ) ; if ( parentName != null ) { pathName = "/" + pathName ; if ( ! "/" . equals ( parentName ) ) { pathName = parentName + pathName ; } } v . visit ( ImageElement . INODE_PATH , pathName ) ; if ( LayoutVersion . supports ( Feature . ADD_INODE_ID , imageVersion ) ) { v . visit ( ImageElement . INODE_ID , in . readLong ( ) ) ; } if ( LayoutVersion . supports ( Feature . HARDLINK , imageVersion ) ) { byte inodeType = in . readByte ( ) ; if ( inodeType == INode . INodeType . HARDLINKED_INODE . type ) { v . visit ( ImageElement . INODE_TYPE , INode . INodeType . HARDLINKED_INODE . toString ( ) ) ; long hardlinkID = WritableUtils . readVLong ( in ) ; v . visit ( ImageElement . INODE_HARDLINK_ID , hardlinkID ) ; } else if ( inodeType == INode . INodeType . RAIDED_INODE . type ) { v . visit ( ImageElement . INODE_TYPE , INode . INodeType . RAIDED_INODE . toString ( ) ) ; String codecId = WritableUtils . readString ( in ) ; v . visit ( ImageElement . RAID_CODEC_ID , codecId ) ; } else { v . visit ( ImageElement . INODE_TYPE , INode . INodeType . REGULAR_INODE . toString ( ) ) ; } } v . visit ( ImageElement . REPLICATION , in . readShort ( ) ) ; v . visit ( ImageElement . MODIFICATION_TIME , in . readLong ( ) ) ; if ( LayoutVersion . supports ( Feature . FILE_ACCESS_TIME , imageVersion ) ) v . visit ( ImageElement . ACCESS_TIME , in . readLong ( ) ) ; v . visit ( ImageElement . BLOCK_SIZE , in . readLong ( ) ) ; int numBlocks = in . readInt ( ) ; processBlocks ( in , v , numBlocks , skipBlocks ) ; if ( numBlocks > 0 || numBlocks == - 1 ) { v . visit ( ImageElement . NS_QUOTA , numBlocks == - 1 ? in . readLong ( ) : - 1 ) ; if ( LayoutVersion . supports ( Feature . DISKSPACE_QUOTA , imageVersion ) ) v . visit ( ImageElement . DS_QUOTA , numBlocks == - 1 ? in . readLong ( ) : - 1 ) ; } if ( numBlocks == - 2 ) { v . visit ( ImageElement . SYMLINK , Text . readString ( in ) ) ; } processPermission ( in , v ) ; v . leaveEnclosingElement ( ) ; }
Process an INode
32,644
Map < AsyncLogger , NewEpochResponseProto > createNewUniqueEpoch ( ) throws IOException { Preconditions . checkState ( ! loggers . isEpochEstablished ( ) , "epoch already created" ) ; Map < AsyncLogger , GetJournalStateResponseProto > lastPromises = loggers . waitForWriteQuorum ( loggers . getJournalState ( ) , getJournalStateTimeoutMs , "getJournalState()" ) ; long maxPromised = Long . MIN_VALUE ; for ( GetJournalStateResponseProto resp : lastPromises . values ( ) ) { maxPromised = Math . max ( maxPromised , resp . getLastPromisedEpoch ( ) ) ; } assert maxPromised >= 0 ; long myEpoch = maxPromised + 1 ; Map < AsyncLogger , NewEpochResponseProto > resps = loggers . waitForWriteQuorum ( loggers . newEpoch ( nsInfo , myEpoch ) , newEpochTimeoutMs , "newEpoch(" + myEpoch + ")" ) ; loggers . setEpoch ( myEpoch ) ; return resps ; }
Fence any previous writers and obtain a unique epoch number for write - access to the journal nodes .
32,645
private boolean hasSomeDataInternal ( boolean image ) throws IOException { QuorumCall < AsyncLogger , Boolean > call = image ? loggers . isImageFormatted ( ) : loggers . isJournalFormatted ( ) ; try { call . waitFor ( loggers . size ( ) , 0 , 0 , hasDataTimeoutMs , "hasSomeData" ) ; } catch ( InterruptedException e ) { throw new IOException ( "Interrupted while determining if JNs have data" ) ; } catch ( TimeoutException e ) { throw new IOException ( "Timed out waiting for response from loggers" ) ; } if ( call . countExceptions ( ) > 0 ) { call . throwQuorumException ( "Unable to check if JNs are ready for formatting" ) ; } for ( Boolean hasData : call . getResults ( ) . values ( ) ) { if ( hasData ) { return true ; } } return false ; }
Checks if any data is available in the underlying storage . Returns true if any of the nodes has some data .
32,646
public void selectInputStreams ( Collection < EditLogInputStream > streams , long fromTxnId , boolean inProgressOk , boolean validateInProgressSegments ) throws IOException { QuorumCall < AsyncLogger , RemoteEditLogManifest > q = loggers . getEditLogManifest ( fromTxnId ) ; Map < AsyncLogger , RemoteEditLogManifest > resps = loggers . waitForReadQuorumWithAllResponses ( q , selectInputStreamsTimeoutMs , "selectInputStreams" ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "selectInputStream manifests:\n" + Joiner . on ( "\n" ) . withKeyValueSeparator ( ": " ) . join ( resps ) ) ; } final PriorityQueue < EditLogInputStream > allStreams = new PriorityQueue < EditLogInputStream > ( 64 , JournalSet . EDIT_LOG_INPUT_STREAM_COMPARATOR ) ; for ( Map . Entry < AsyncLogger , RemoteEditLogManifest > e : resps . entrySet ( ) ) { AsyncLogger logger = e . getKey ( ) ; RemoteEditLogManifest manifest = e . getValue ( ) ; for ( RemoteEditLog remoteLog : manifest . getLogs ( ) ) { EditLogInputStream elis = new URLLogInputStream ( logger , remoteLog . getStartTxId ( ) , httpConnectReadTimeoutMs ) ; if ( elis . isInProgress ( ) && ! inProgressOk ) { continue ; } allStreams . add ( elis ) ; } } JournalSet . chainAndMakeRedundantStreams ( streams , allStreams , fromTxnId , inProgressOk , 0 ) ; }
Select input streams . inProgressOk should be true only for tailing not for startup
32,647
public OutputStream getCheckpointOutputStream ( long txid ) throws IOException { return new HttpImageUploadStream ( httpAddresses , journalId , nsInfo , txid , loggers . getEpoch ( ) , imageUploadBufferSize , imageUploadMaxBufferedChunks ) ; }
Creates output stream for image at txid to the underlying quorum of journal nodes .
32,648
public boolean saveDigestAndRenameCheckpointImage ( long txid , MD5Hash digest ) { try { LOG . info ( "Saving md5: " + digest + " for txid: " + txid ) ; QuorumCall < AsyncLogger , Void > q = loggers . saveDigestAndRenameCheckpointImage ( txid , digest ) ; loggers . waitForWriteQuorum ( q , writeTxnsTimeoutMs , "saveDigestAndRenameCheckpointImage(" + txid + ")" ) ; return true ; } catch ( IOException e ) { LOG . error ( "Exception when rolling the image:" , e ) ; return false ; } }
Roll image and save md5 digest to the underlying nodes . This is a quorum roll and we ensure that it can succeed only on the nodes that consumed entirely the uploaded image .
32,649
public RemoteImageManifest getImageManifest ( long fromTxnId ) throws IOException { QuorumCall < AsyncLogger , RemoteImageManifest > q = loggers . getImageManifest ( fromTxnId ) ; Map < AsyncLogger , RemoteImageManifest > resps = loggers . waitForReadQuorumWithAllResponses ( q , getImageManifestTimeoutMs , "getImageManifest" ) ; return createImageManifest ( resps . values ( ) ) ; }
Get manifest for the images stored in journal nodes . An image is considered valid if it appears in majority of the nodes with a valid md5 sum . The returned images are sorted according to their transaction id .
32,650
static RemoteImageManifest createImageManifest ( Collection < RemoteImageManifest > resps ) throws IOException { Map < Long , RemoteImage > images = Maps . newHashMap ( ) ; for ( RemoteImageManifest rm : resps ) { for ( RemoteImage ri : rm . getImages ( ) ) { if ( ri . getDigest ( ) == null ) { LOG . info ( "Skipping: " + ri + " as it does not have md5 digest" ) ; continue ; } if ( images . containsKey ( ri . getTxId ( ) ) ) { if ( ! images . get ( ri . getTxId ( ) ) . equals ( ri ) ) { throw new IOException ( "Images received from different nodes do not match: " + images . get ( ri . getTxId ( ) ) + " vs: " + ri ) ; } } else { images . put ( ri . getTxId ( ) , ri ) ; } } } List < RemoteImage > result = Lists . newArrayList ( ) ; for ( RemoteImage ri : images . values ( ) ) { result . add ( ri ) ; } Collections . sort ( result ) ; return new RemoteImageManifest ( result ) ; }
Concatenate manifests obtained from the underlying journalnodes . The final manifest will contain only the images committed to the majority of the nodes . Images with no md5 associated are ignored . Also the md5 must match between images from different journal nodes .
32,651
public ImageInputStream getImageInputStream ( long txid ) throws IOException { URLImageInputStream stream = loggers . getImageInputStream ( txid , httpConnectReadTimeoutMs ) ; if ( stream == null ) { throw new IOException ( "Cannot obtain input stream for image: " + txid ) ; } return new ImageInputStream ( txid , stream , stream . getImageDigest ( ) , stream . toString ( ) , stream . getSize ( ) ) ; }
Get input stream to one of the nodes for given txid .
32,652
public static MD5Hash read ( DataInput in ) throws IOException { MD5Hash result = new MD5Hash ( ) ; result . readFields ( in ) ; return result ; }
Constructs reads and returns an instance .
32,653
public static MD5Hash digest ( InputStream in ) throws IOException { final byte [ ] buffer = new byte [ 4 * 1024 ] ; int fileLength = 0 ; final MessageDigest digester = DIGESTER_FACTORY . get ( ) ; for ( int n ; ( n = in . read ( buffer ) ) != - 1 ; ) { digester . update ( buffer , 0 , n ) ; fileLength += n ; } return new MD5Hash ( digester . digest ( ) , fileLength ) ; }
Construct a hash value for the content from the InputStream .
32,654
public static MD5Hash digest ( byte [ ] data , int start , int len ) { byte [ ] digest ; MessageDigest digester = DIGESTER_FACTORY . get ( ) ; digester . update ( data , start , len ) ; digest = digester . digest ( ) ; return new MD5Hash ( digest ) ; }
Construct a hash value for a byte array .
32,655
public void forceClean ( ) { while ( true ) { PathDeletionContext context = null ; try { context = cleanupThread . queue . poll ( 50L , TimeUnit . MILLISECONDS ) ; if ( context == null ) { return ; } if ( ! deletePath ( context ) ) { LOG . warn ( "forceClean:Unable to delete path " + context . fullPath ) ; } else { LOG . info ( "foceClean DELETED " + context . fullPath ) ; } } catch ( InterruptedException e ) { return ; } catch ( Exception e ) { LOG . warn ( "Error deleting path " + context . fullPath + ": " + e ) ; } } }
Force to clean the all path it should be called when task tracker is shut down Now we only called it in MiniCoronaCluster to make sure the unit test run in a clean fixture
32,656
public void stop ( ) { running = false ; if ( server != null ) server . stop ( ) ; if ( aggregateDaemon != null ) aggregateDaemon . interrupt ( ) ; }
Stop all Collector threads and wait for all to finish .
32,657
public TaskTrackerUtilization getTaskTrackerUtilization ( String hostName ) throws IOException { if ( taskTrackerReports . get ( hostName ) == null ) { return null ; } return taskTrackerReports . get ( hostName ) . getTaskTrackerUtilization ( ) ; }
Implement CollectorProtocol methods
32,658
public static void main ( String argv [ ] ) throws Exception { StringUtils . startupShutdownMessage ( UtilizationCollector . class , argv , LOG ) ; try { Configuration conf = new Configuration ( ) ; UtilizationCollector collector = new UtilizationCollector ( conf ) ; if ( collector != null ) { collector . join ( ) ; } } catch ( Throwable e ) { LOG . error ( StringUtils . stringifyException ( e ) ) ; System . exit ( - 1 ) ; } }
main program to run on the Collector server
32,659
private static void checkPaths ( Configuration conf , List < Path > paths ) throws IOException { for ( Path p : paths ) { FileSystem fs = p . getFileSystem ( conf ) ; if ( ! fs . exists ( p ) ) { throw new FileNotFoundException ( "Source " + p + " does not exist." ) ; } } }
check the src paths
32,660
private Path relPathToRoot ( Path fullPath , Path root ) { Path justRoot = new Path ( Path . SEPARATOR ) ; if ( fullPath . depth ( ) == root . depth ( ) ) { return justRoot ; } else if ( fullPath . depth ( ) > root . depth ( ) ) { Path retPath = new Path ( fullPath . getName ( ) ) ; Path parent = fullPath . getParent ( ) ; for ( int i = 0 ; i < ( fullPath . depth ( ) - root . depth ( ) - 1 ) ; i ++ ) { retPath = new Path ( parent . getName ( ) , retPath ) ; parent = parent . getParent ( ) ; } return new Path ( justRoot , retPath ) ; } return null ; }
truncate the prefix root from the full path
32,661
private void cleanJobDirectory ( ) { try { FileSystem jobfs = jobDirectory . getFileSystem ( conf ) ; jobfs . delete ( jobDirectory , true ) ; } catch ( IOException ioe ) { LOG . warn ( "Unable to clean tmp directory " + jobDirectory , ioe ) ; } }
delete the tmp job directory
32,662
private void archive ( Path parentPath , List < Path > srcPaths , Path outputPath , boolean append ) throws IOException { parentPath = parentPath . makeQualified ( parentPath . getFileSystem ( conf ) ) ; checkPaths ( conf , srcPaths ) ; Path destinationDir = outputPath . getParent ( ) ; FileOutputFormat . setOutputPath ( conf , outputPath ) ; FileSystem outFs = outputPath . getFileSystem ( conf ) ; if ( append ) { if ( ! outFs . exists ( outputPath ) ) { throw new IOException ( "Invalid Output. HAR File " + outputPath + "doesn't exist" ) ; } if ( outFs . isFile ( outputPath ) ) { throw new IOException ( "Invalid Output. HAR File " + outputPath + "must be represented as directory" ) ; } } else { if ( outFs . exists ( outputPath ) ) { throw new IOException ( "Invalid Output: " + outputPath + ". File already exists" ) ; } if ( outFs . isFile ( destinationDir ) ) { throw new IOException ( "Invalid Output. " + outputPath + " is not a directory" ) ; } } long totalSize = writeFilesToProcess ( parentPath , srcPaths ) ; FileSystem fs = parentPath . getFileSystem ( conf ) ; conf . set ( SRC_LIST_LABEL , srcFiles . toString ( ) ) ; conf . set ( SRC_PARENT_LABEL , parentPath . makeQualified ( fs ) . toString ( ) ) ; conf . setLong ( TOTAL_SIZE_LABEL , totalSize ) ; long partSize = conf . getLong ( HAR_PARTSIZE_LABEL , HAR_PARTSIZE_DEFAULT ) ; int numMaps = ( int ) ( totalSize / partSize ) ; conf . setNumMapTasks ( numMaps == 0 ? 1 : numMaps ) ; conf . setNumReduceTasks ( 1 ) ; conf . setOutputFormat ( NullOutputFormat . class ) ; conf . setMapOutputKeyClass ( IntWritable . class ) ; conf . setMapOutputValueClass ( Text . class ) ; conf . set ( "hadoop.job.history.user.location" , "none" ) ; conf . setSpeculativeExecution ( false ) ; if ( append ) { int partId = findFirstAvailablePartId ( outputPath ) ; conf . setInt ( PART_ID_OFFSET , partId ) ; Path index = new Path ( outputPath , HarFileSystem . INDEX_NAME ) ; Path indexDirectory = new Path ( outputPath , HarFileSystem . INDEX_NAME + ".copy" ) ; outFs . mkdirs ( indexDirectory ) ; Path indexCopy = new Path ( indexDirectory , "data" ) ; outFs . rename ( index , indexCopy ) ; MultipleInputs . addInputPath ( conf , jobDirectory , HArchiveInputFormat . class , HArchivesMapper . class ) ; MultipleInputs . addInputPath ( conf , indexDirectory , TextInputFormat . class , HArchivesConvertingMapper . class ) ; conf . setReducerClass ( HArchivesMergingReducer . class ) ; } else { conf . setMapperClass ( HArchivesMapper . class ) ; conf . setInputFormat ( HArchiveInputFormat . class ) ; FileInputFormat . addInputPath ( conf , jobDirectory ) ; conf . setReducerClass ( HArchivesReducer . class ) ; } JobClient . runJob ( conf ) ; cleanJobDirectory ( ) ; }
archive the given source paths into the dest
32,663
private static void writeLineToMasterIndex ( FSDataOutputStream stream , long startHash , long endHash , long indexStartPos , long indexEndPos ) throws IOException { String toWrite = startHash + " " + endHash + " " + indexStartPos + " " + indexEndPos + "\n" ; stream . write ( toWrite . getBytes ( ) ) ; }
Writes data corresponding to part of _index to master index
32,664
private FSDataOutputStream createNewPartStream ( Path dst , int partId ) throws IOException { String partName = PART_PREFIX + partId ; Path output = new Path ( dst , partName ) ; FileSystem destFs = output . getFileSystem ( conf ) ; FSDataOutputStream partStream = destFs . create ( output , false , conf . getInt ( "io.file.buffer.size" , 4096 ) , destFs . getDefaultReplication ( ) , conf . getLong ( HAR_BLOCKSIZE_LABEL , HAR_BLOCKSIZE_DEFAULT ) ) ; return partStream ; }
Creates new stream to write actual file data
32,665
public static String ask ( String prompt , String firstChoice , String ... choices ) throws IOException { while ( true ) { LOG . info ( prompt ) ; StringBuilder responseBuilder = new StringBuilder ( ) ; while ( true ) { int c = System . in . read ( ) ; if ( c == - 1 || c == '\r' || c == '\n' ) { break ; } responseBuilder . append ( ( char ) c ) ; } String response = responseBuilder . toString ( ) ; if ( response . equalsIgnoreCase ( firstChoice ) ) return firstChoice ; for ( String c : choices ) { if ( response . equalsIgnoreCase ( c ) ) { return c ; } } LOG . error ( "I'm sorry, I cannot understand your response.\n" ) ; } }
Display a prompt to the user and get his or her choice .
32,666
public void launchTask ( TaskInProgress tip ) throws IOException { LOG . info ( "Launching simulated task " + tip . getTask ( ) . getTaskID ( ) + " for job " + tip . getTask ( ) . getJobID ( ) ) ; TaskUmbilicalProtocol umbilicalProtocol = taskTracker . getUmbilical ( tip ) ; if ( tip . getTask ( ) . isMapTask ( ) || tip . getTask ( ) . isTaskCleanupTask ( ) || tip . getTask ( ) . isJobCleanupTask ( ) || tip . getTask ( ) . isJobSetupTask ( ) ) { addTipToFinish ( tip , umbilicalProtocol ) ; } else { MapperWaitThread mwt = new MapperWaitThread ( tip , this , umbilicalProtocol ) ; mapperWaitThreadMap . put ( tip , mwt ) ; mwt . start ( ) ; } }
The primary public method that should be called to run a task . Handles both map and reduce tasks and marks them as completed after the configured time interval
32,667
protected void addTipToFinish ( TaskInProgress tip , TaskUmbilicalProtocol umbilicalProtocol ) { long currentTime = System . currentTimeMillis ( ) ; long finishTime = currentTime + Math . abs ( rand . nextLong ( ) ) % timeToFinishTask ; LOG . info ( "Adding TIP " + tip . getTask ( ) . getTaskID ( ) + " to finishing queue with start time " + currentTime + " and finish time " + finishTime + " (" + ( ( finishTime - currentTime ) / 1000.0 ) + " sec) to thread " + getName ( ) ) ; TipToFinish ttf = new TipToFinish ( tip , finishTime , umbilicalProtocol ) ; tipQueue . put ( ttf ) ; this . interrupt ( ) ; }
Add the specified TaskInProgress to the priority queue of tasks to finish .
32,668
public void run ( ) { while ( true ) { TipToFinish ttf = null ; try { LOG . debug ( "Waiting for a TIP" ) ; ttf = tipQueue . take ( ) ; } catch ( InterruptedException e ) { LOG . info ( "Got interrupted exception while waiting to take()" ) ; continue ; } LOG . debug ( " Got a TIP " + ttf . getTip ( ) . getTask ( ) . getTaskID ( ) + " at time " + System . currentTimeMillis ( ) + " with finish time " + ttf . getTimeToFinish ( ) ) ; boolean interrupted = false ; while ( true ) { long currentTime = System . currentTimeMillis ( ) ; if ( currentTime < ttf . getTimeToFinish ( ) ) { try { long sleepTime = ttf . getTimeToFinish ( ) - currentTime ; LOG . debug ( "Sleeping for " + sleepTime + " ms" ) ; Thread . sleep ( sleepTime ) ; } catch ( InterruptedException e ) { LOG . debug ( "Finisher thread was interrupted" , e ) ; interrupted = true ; break ; } } else { break ; } } if ( interrupted ) { LOG . info ( "Putting back TIP " + ttf . getTip ( ) . getTask ( ) . getTaskID ( ) + " for job " + ttf . getTip ( ) . getTask ( ) . getJobID ( ) ) ; tipQueue . put ( ttf ) ; continue ; } TaskInProgress tip = ttf . getTip ( ) ; ttf . finishTip ( ) ; if ( ! tip . getTask ( ) . isMapTask ( ) && ! tip . getTask ( ) . isTaskCleanupTask ( ) && ! tip . getTask ( ) . isJobCleanupTask ( ) && ! tip . getTask ( ) . isJobSetupTask ( ) ) { if ( ! mapperWaitThreadMap . containsKey ( tip ) ) { throw new RuntimeException ( "Unable to find mapper wait thread for " + tip . getTask ( ) . getTaskID ( ) + " job " + tip . getTask ( ) . getJobID ( ) ) ; } LOG . debug ( "Removing mapper wait thread for " + tip . getTask ( ) . getTaskID ( ) + " job " + tip . getTask ( ) . getJobID ( ) ) ; mapperWaitThreadMap . remove ( tip ) ; } else if ( mapperWaitThreadMap . containsKey ( tip ) ) { throw new RuntimeException ( "Mapper wait thread exists for" + tip . getTask ( ) . getTaskID ( ) + " job " + tip . getTask ( ) . getJobID ( ) + " when it shouldn't!" ) ; } } }
Continuously looks through the queue of TIP s to mark as finished finishing and sleeping as necessary . Can be interrupted while it s sleeping if it needs to re - evaluate how long to sleep .
32,669
public void cancel ( TaskInProgress tip ) { LOG . info ( "Canceling task " + tip . getTask ( ) . getTaskID ( ) + " of job " + tip . getTask ( ) . getJobID ( ) ) ; if ( ! tip . getTask ( ) . isMapTask ( ) && ! tip . getTask ( ) . isTaskCleanupTask ( ) ) { if ( ! mapperWaitThreadMap . containsKey ( tip ) ) { throw new RuntimeException ( "Mapper wait thread doesn't exist " + "for " + tip . getTask ( ) . getTaskID ( ) ) ; } LOG . debug ( "Interrupting mapper wait thread for " + tip . getTask ( ) . getTaskID ( ) + " job " + tip . getTask ( ) . getJobID ( ) ) ; mapperWaitThreadMap . get ( tip ) . interrupt ( ) ; LOG . debug ( "Removing mapper wait thread for " + tip . getTask ( ) . getTaskID ( ) + " job " + tip . getTask ( ) . getJobID ( ) ) ; mapperWaitThreadMap . remove ( tip ) ; } else { LOG . debug ( tip . getTask ( ) . getTaskID ( ) + " is not a reduce task, so " + "not canceling mapper wait thread" ) ; } removeFromFinishingQueue ( tip ) ; }
Called in case the task needs to be killed . Canceling will kill any map wait threads and also remove it from the queue of tasks that should be marked as finished .
32,670
public EventRecord parseLine ( String line ) throws IOException { EventRecord retval = null ; if ( line != null ) { String patternStr = "(" + dateformat + ")" ; patternStr += "\\s+" ; patternStr += "(" + timeformat + ")" ; patternStr += ".{4}\\s(\\w*)\\s" ; patternStr += "\\s*([\\w+\\.?]+)" ; patternStr += ":\\s+(.+)" ; Pattern pattern = Pattern . compile ( patternStr ) ; Matcher matcher = pattern . matcher ( line ) ; if ( matcher . find ( 0 ) && matcher . groupCount ( ) >= 5 ) { retval = new EventRecord ( hostname , ips , parseDate ( matcher . group ( 1 ) , matcher . group ( 2 ) ) , "HadoopLog" , matcher . group ( 3 ) , matcher . group ( 4 ) , matcher . group ( 5 ) ) ; } else { retval = new EventRecord ( ) ; } } return retval ; }
Parses one line of the log . If the line contains a valid log entry then an appropriate EventRecord is returned after all relevant fields have been parsed .
32,671
protected Calendar parseDate ( String strDate , String strTime ) { Calendar retval = Calendar . getInstance ( ) ; String [ ] fields = strDate . split ( "-" ) ; retval . set ( Calendar . YEAR , Integer . parseInt ( fields [ 0 ] ) ) ; retval . set ( Calendar . MONTH , Integer . parseInt ( fields [ 1 ] ) ) ; retval . set ( Calendar . DATE , Integer . parseInt ( fields [ 2 ] ) ) ; fields = strTime . split ( ":" ) ; retval . set ( Calendar . HOUR_OF_DAY , Integer . parseInt ( fields [ 0 ] ) ) ; retval . set ( Calendar . MINUTE , Integer . parseInt ( fields [ 1 ] ) ) ; retval . set ( Calendar . SECOND , Integer . parseInt ( fields [ 2 ] ) ) ; return retval ; }
Parse a date found in the Hadoop log .
32,672
private void findHostname ( ) { String startupInfo = Environment . runCommand ( "grep --max-count=1 STARTUP_MSG:\\s*host " + file . getName ( ) ) . toString ( ) ; Pattern pattern = Pattern . compile ( "\\s+(\\w+/.+)\\s+" ) ; Matcher matcher = pattern . matcher ( startupInfo ) ; if ( matcher . find ( 0 ) ) { hostname = matcher . group ( 1 ) . split ( "/" ) [ 0 ] ; ips = new String [ 1 ] ; ips [ 0 ] = matcher . group ( 1 ) . split ( "/" ) [ 1 ] ; } }
Attempt to determine the hostname of the node that created the log file . This information can be found in the STARTUP_MSG lines of the Hadoop log which are emitted when the node starts .
32,673
public boolean addToCorruptReplicasMap ( Block blk , DatanodeDescriptor dn ) { Collection < DatanodeDescriptor > nodes = getNodes ( blk ) ; if ( nodes == null ) { nodes = new TreeSet < DatanodeDescriptor > ( ) ; corruptReplicasMap . put ( blk , nodes ) ; } boolean added = false ; if ( ! nodes . contains ( dn ) ) { added = nodes . add ( dn ) ; NameNode . stateChangeLog . info ( "BLOCK NameSystem.addToCorruptReplicasMap: " + blk . getBlockName ( ) + " added as corrupt on " + dn . getName ( ) + " by " + Server . getRemoteIp ( ) ) ; } else { NameNode . stateChangeLog . info ( "BLOCK NameSystem.addToCorruptReplicasMap: " + "duplicate requested for " + blk . getBlockName ( ) + " to add as corrupt " + "on " + dn . getName ( ) + " by " + Server . getRemoteIp ( ) ) ; } return added ; }
Mark the block belonging to datanode as corrupt .
32,674
boolean removeFromCorruptReplicasMap ( Block blk , DatanodeDescriptor datanode ) { Collection < DatanodeDescriptor > datanodes = corruptReplicasMap . get ( blk ) ; if ( datanodes == null ) return false ; if ( datanodes . remove ( datanode ) ) { if ( datanodes . isEmpty ( ) ) { corruptReplicasMap . remove ( blk ) ; } return true ; } return false ; }
Remove the block at the given datanode from CorruptBlockMap
32,675
Collection < DatanodeDescriptor > getNodes ( Block blk ) { if ( corruptReplicasMap . size ( ) == 0 ) return null ; return corruptReplicasMap . get ( blk ) ; }
Get Nodes which have corrupt replicas of Block
32,676
boolean isReplicaCorrupt ( Block blk , DatanodeDescriptor node ) { Collection < DatanodeDescriptor > nodes = getNodes ( blk ) ; return ( ( nodes != null ) && ( nodes . contains ( node ) ) ) ; }
Check if replica belonging to Datanode is corrupt
32,677
static int getFingerprint ( Method method ) { int hashcode = method . getName ( ) . hashCode ( ) ; hashcode = hashcode + 31 * method . getReturnType ( ) . getName ( ) . hashCode ( ) ; for ( Class < ? > type : method . getParameterTypes ( ) ) { hashcode = 31 * hashcode ^ type . getName ( ) . hashCode ( ) ; } return hashcode ; }
Calculate a method s hash code considering its method name returning type and its parameter types
32,678
private static int [ ] getFingerprints ( Method [ ] methods ) { if ( methods == null ) { return null ; } int [ ] hashCodes = new int [ methods . length ] ; for ( int i = 0 ; i < methods . length ; i ++ ) { hashCodes [ i ] = getFingerprint ( methods [ i ] ) ; } return hashCodes ; }
Convert an array of Method into an array of hash codes
32,679
private static ProtocolSigFingerprint getSigFingerprint ( Class < ? extends VersionedProtocol > protocol , long serverVersion ) { String protocolName = protocol . getName ( ) ; synchronized ( PROTOCOL_FINGERPRINT_CACHE ) { ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE . get ( protocolName ) ; if ( sig == null ) { int [ ] serverMethodHashcodes = getFingerprints ( protocol . getMethods ( ) ) ; sig = new ProtocolSigFingerprint ( new ProtocolSignature ( serverVersion , serverMethodHashcodes ) , getFingerprint ( serverMethodHashcodes ) ) ; PROTOCOL_FINGERPRINT_CACHE . put ( protocolName , sig ) ; } return sig ; } }
Return a protocol s signature and finger print from cache
32,680
public static void incrComputeSpecs ( ComputeSpecs target , ComputeSpecs incr ) { target . numCpus += incr . numCpus ; target . memoryMB += incr . memoryMB ; target . diskGB += incr . diskGB ; }
Increase the compute specs
32,681
public static void decrComputeSpecs ( ComputeSpecs target , ComputeSpecs decr ) { target . numCpus -= decr . numCpus ; target . memoryMB -= decr . memoryMB ; target . diskGB -= decr . diskGB ; }
Decrease the compute specs by decr
32,682
public static void waitThreadTermination ( Thread thread ) { while ( thread != null && thread . isAlive ( ) ) { thread . interrupt ( ) ; try { thread . join ( ) ; } catch ( InterruptedException e ) { } } }
A realiable way to wait for the thread termination
32,683
public static InetAddress appInfoToAddress ( String info ) { Matcher m = INET_ADDRESS_PATTERN . matcher ( info ) ; if ( m . find ( ) ) { int port = Integer . parseInt ( m . group ( 2 ) ) ; return new InetAddress ( m . group ( 1 ) , port ) ; } return null ; }
Convert the appinfo string to the address the application is available on
32,684
public static void makeProcessExitOnUncaughtException ( final Log log ) { Thread . setDefaultUncaughtExceptionHandler ( new Thread . UncaughtExceptionHandler ( ) { public void uncaughtException ( Thread t , Throwable e ) { log . error ( "UNCAUGHT: Thread " + t . getName ( ) + " got an uncaught exception" , e ) ; System . exit ( 1 ) ; } } ) ; }
Sets an uncaught exception handler . This will make the process exit with exit code 1 if a thread exits due to an uncaught exception .
32,685
public static void interruptedException ( String msg , InterruptedException e ) throws IOException { Thread . currentThread ( ) . interrupt ( ) ; LOG . error ( msg , e ) ; throw new IOException ( msg , e ) ; }
Interrupt the thread and then log and re - throw an InterruptedException as an IOException .
32,686
private static long getBlockSize ( LocatedBlocks lbs ) throws IOException { List < LocatedBlock > locatedBlocks = lbs . getLocatedBlocks ( ) ; long bs = - 1 ; for ( LocatedBlock lb : locatedBlocks ) { if ( lb . getBlockSize ( ) > bs ) { bs = lb . getBlockSize ( ) ; } } return bs ; }
Obtain block size given 3 or more blocks
32,687
public boolean undelete ( Path f , String userName ) throws IOException { List < Codec > codecList = Codec . getCodecs ( ) ; Path [ ] parityPathList = new Path [ codecList . size ( ) ] ; for ( int i = 0 ; i < parityPathList . length ; i ++ ) { parityPathList [ i ] = new Path ( codecList . get ( i ) . parityDirectory , makeRelative ( f ) ) ; } if ( ! fs . undelete ( f , userName ) ) { return false ; } for ( Path parityPath : parityPathList ) { fs . undelete ( parityPath , userName ) ; } return true ; }
undelete the parity file together with the src file .
32,688
private boolean searchHarDir ( FileStatus stat ) throws IOException { if ( ! stat . isDir ( ) ) { return false ; } String pattern = stat . getPath ( ) . toString ( ) + "/*" + RaidNode . HAR_SUFFIX + "*" ; FileStatus [ ] stats = globStatus ( new Path ( pattern ) ) ; if ( stats != null && stats . length > 0 ) { return true ; } stats = fs . listStatus ( stat . getPath ( ) ) ; for ( FileStatus status : stats ) { if ( searchHarDir ( status ) ) { return true ; } } return false ; }
search the Har - ed parity files
32,689
public INodeHardLinkFile getHardLinkedFile ( int i ) { if ( i < this . linkedFiles . size ( ) ) { return this . linkedFiles . get ( i ) ; } return null ; }
Return the i th of the hardlinked file
32,690
public void removeLinkedFile ( INodeHardLinkFile file ) { for ( int i = 0 ; i < linkedFiles . size ( ) ; i ++ ) { if ( linkedFiles . get ( i ) == file ) { linkedFiles . remove ( i ) ; break ; } } INodeFile newOwner = null ; if ( linkedFiles . size ( ) == 1 ) { INodeHardLinkFile lastReferencedFile = linkedFiles . get ( 0 ) ; INodeFile inodeFile = new INodeFile ( lastReferencedFile ) ; lastReferencedFile . parent . replaceChild ( inodeFile ) ; linkedFiles . clear ( ) ; newOwner = inodeFile ; } else { if ( file . getBlocks ( ) != null && file . getBlocks ( ) . length > 0 && ( file . getBlocks ( ) [ 0 ] . getINode ( ) == file ) ) { newOwner = linkedFiles . get ( 0 ) ; } } if ( newOwner != null ) { for ( BlockInfo blkInfo : file . getBlocks ( ) ) { blkInfo . setINode ( newOwner ) ; } } }
Remove an INodeHardLinkFile from the linkedFiles . This function is not thread - safe . The caller is supposed to have a writeLock .
32,691
protected void setPermissionStatus ( PermissionStatus ps ) { for ( INodeHardLinkFile linkedFile : linkedFiles ) { linkedFile . setPermissionStatus ( ps , false ) ; } }
Set the PermissionSatus for all the linked files
32,692
protected void setUser ( String user ) { for ( INodeHardLinkFile linkedFile : linkedFiles ) { linkedFile . setUser ( user , false ) ; } }
Set the user name for all the linked files
32,693
protected void setGroup ( String group ) { for ( INodeHardLinkFile linkedFile : linkedFiles ) { linkedFile . setGroup ( group , false ) ; } }
Set the group name for all the linked files
32,694
protected void setPermission ( FsPermission permission ) { for ( INodeHardLinkFile linkedFile : linkedFiles ) { linkedFile . setPermission ( permission , false ) ; } }
Set the permission for all the linked files
32,695
public static void writeRAMFiles ( DataOutput out , RAMDirectory dir , String [ ] names ) throws IOException { out . writeInt ( names . length ) ; for ( int i = 0 ; i < names . length ; i ++ ) { Text . writeString ( out , names [ i ] ) ; long length = dir . fileLength ( names [ i ] ) ; out . writeLong ( length ) ; if ( length > 0 ) { IndexInput input = null ; try { input = dir . openInput ( names [ i ] , BUFFER_SIZE ) ; int position = 0 ; byte [ ] buffer = new byte [ BUFFER_SIZE ] ; while ( position < length ) { int len = position + BUFFER_SIZE <= length ? BUFFER_SIZE : ( int ) ( length - position ) ; input . readBytes ( buffer , 0 , len ) ; out . write ( buffer , 0 , len ) ; position += len ; } } finally { if ( input != null ) { input . close ( ) ; } } } } }
Write a number of files from a ram directory to a data output .
32,696
public static void readRAMFiles ( DataInput in , RAMDirectory dir ) throws IOException { int numFiles = in . readInt ( ) ; for ( int i = 0 ; i < numFiles ; i ++ ) { String name = Text . readString ( in ) ; long length = in . readLong ( ) ; if ( length > 0 ) { IndexOutput output = null ; try { output = dir . createOutput ( name ) ; int position = 0 ; byte [ ] buffer = new byte [ BUFFER_SIZE ] ; while ( position < length ) { int len = position + BUFFER_SIZE <= length ? BUFFER_SIZE : ( int ) ( length - position ) ; in . readFully ( buffer , 0 , len ) ; output . writeBytes ( buffer , 0 , len ) ; position += len ; } } finally { if ( output != null ) { output . close ( ) ; } } } } }
Read a number of files from a data input to a ram directory .
32,697
public void record ( TaskInProgress tip , String host , long inputBytes ) { synchronized ( localityRecords ) { localityRecords . add ( new Record ( tip , host , inputBytes ) ) ; localityRecords . notify ( ) ; } }
Asynchronous update of locality .
32,698
private void computeStatistics ( Record record ) { computeStatistics ( record . tip , record . host , record . inputBytes ) ; }
Peform the computation statistics based on a locality record .
32,699
private void computeStatistics ( TaskInProgress tip , String host , long inputBytes ) { int level = this . maxLevel ; String [ ] splitLocations = tip . getSplitLocations ( ) ; if ( splitLocations . length > 0 ) { Node tracker = topologyCache . getNode ( host ) ; for ( String local : splitLocations ) { Node datanode = topologyCache . getNode ( local ) ; int newLevel = this . maxLevel ; if ( tracker != null && datanode != null ) { newLevel = getMatchingLevelForNodes ( tracker , datanode , maxLevel ) ; } if ( newLevel < level ) { level = newLevel ; if ( level == 0 ) { break ; } } } } boolean updateTaskCountOnly = inputBytes < 0 ; switch ( level ) { case 0 : if ( updateTaskCountOnly ) { LOG . info ( "Chose data-local task " + tip . getTIPId ( ) ) ; jobCounters . incrCounter ( Counter . DATA_LOCAL_MAPS , 1 ) ; jobStats . incNumDataLocalMaps ( ) ; } else { jobCounters . incrCounter ( Counter . LOCAL_MAP_INPUT_BYTES , inputBytes ) ; jobStats . incLocalMapInputBytes ( inputBytes ) ; } break ; case 1 : if ( updateTaskCountOnly ) { LOG . info ( "Chose rack-local task " + tip . getTIPId ( ) ) ; jobCounters . incrCounter ( Counter . RACK_LOCAL_MAPS , 1 ) ; jobStats . incNumRackLocalMaps ( ) ; } else { jobCounters . incrCounter ( Counter . RACK_MAP_INPUT_BYTES , inputBytes ) ; jobStats . incRackMapInputBytes ( inputBytes ) ; } break ; default : LOG . info ( "Chose non-local task " + tip . getTIPId ( ) + " at level " + level ) ; if ( updateTaskCountOnly && level != this . maxLevel ) { jobCounters . incrCounter ( Counter . OTHER_LOCAL_MAPS , 1 ) ; } break ; } }
Peform the computation statistics .