idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,600
private void processParityHarPartBlock ( FileSystem dfs , Path partFile , long blockOffset , FileStatus partFileStat , HarIndex harIndex , File localBlockFile , Progressable progress ) throws IOException { String partName = partFile . toUri ( ) . getPath ( ) ; partName = partName . substring ( 1 + partName . lastIndexO...
This reconstructs a single part file block by recovering in sequence each parity block in the part file block .
32,601
DataInputStream computeMetadata ( Configuration conf , InputStream dataStream ) throws IOException { ByteArrayOutputStream mdOutBase = new ByteArrayOutputStream ( 1024 * 1024 ) ; DataOutputStream mdOut = new DataOutputStream ( mdOutBase ) ; mdOut . writeShort ( FSDataset . FORMAT_VERSION_NON_INLINECHECKSUM ) ; int byte...
Reads data from the data stream provided and computes metadata .
32,602
private void sendReconstructedBlock ( String datanode , final FileInputStream blockContents , final DataInputStream metadataIn , Block block , long blockSize , int dataTransferVersion , int namespaceId , Progressable progress ) throws IOException { InetSocketAddress target = NetUtils . createSocketAddr ( datanode ) ; S...
Send a generated block to a datanode .
32,603
public static UserGroupInformation login ( Configuration conf ) throws LoginException { if ( LOGIN_UGI == null ) { LOGIN_UGI = UnixUserGroupInformation . login ( conf ) ; } return LOGIN_UGI ; }
Login and return a UserGroupInformation object .
32,604
public static UserGroupInformation getUGI ( Configuration conf ) throws LoginException { UserGroupInformation ugi = null ; if ( conf . getBoolean ( UGI_SOURCE , true ) ) { ugi = UnixUserGroupInformation . readFromConf ( conf , UnixUserGroupInformation . UGI_PROPERTY_NAME ) ; } else { ugi = UserGroupInformation . getCur...
get the current user id
32,605
public < T > T doAs ( PrivilegedAction < T > action ) { return Subject . doAs ( null , action ) ; }
Run the given action as the user .
32,606
public < T > T doAs ( PrivilegedExceptionAction < T > action ) throws IOException , InterruptedException { try { return Subject . doAs ( null , action ) ; } catch ( PrivilegedActionException pae ) { Throwable cause = pae . getCause ( ) ; LOG . error ( "PriviledgedActionException as:" + this + " cause:" + cause ) ; if (...
Run the given action as the user potentially throwing an exception .
32,607
public void map ( K1 arg0 , V1 arg1 , OutputCollector < Text , Text > arg2 , Reporter arg3 ) throws IOException { throw new IOException ( "should not be called\n" ) ; }
Do nothing . Should not be called
32,608
public static RaidInfo getFileRaidInfo ( final FileStatus stat , Configuration conf , boolean skipHarChecking ) throws IOException { ParityFilePair ppair = null ; for ( Codec c : Codec . getCodecs ( ) ) { ppair = ParityFilePair . getParityFile ( c , stat , conf , skipHarChecking ) ; if ( ppair != null ) { return new Ra...
returns the raid for a given file
32,609
private static BlockLocation [ ] getParityBlocks ( final Path filePath , final long blockSize , final long numStripes , final RaidInfo raidInfo ) throws IOException { FileSystem parityFS = raidInfo . parityPair . getFileSystem ( ) ; FileStatus parityFileStatus = raidInfo . parityPair . getFileStatus ( ) ; long parityFi...
gets the parity blocks corresponding to file returns the parity blocks in case of DFS and the part blocks containing parity blocks in case of HAR FS
32,610
private static void checkParityBlocks ( final Path filePath , final Map < Integer , Integer > corruptBlocksPerStripe , final long blockSize , final long startStripeIdx , final long endStripeIdx , final long numStripes , final RaidInfo raidInfo ) throws IOException { BlockLocation [ ] containerBlocks = getParityBlocks (...
checks the parity blocks for a given file and modifies corruptBlocksPerStripe accordingly
32,611
public static Path sourcePathFromParityPath ( Path parityPath , FileSystem fs ) throws IOException { String parityPathStr = parityPath . toUri ( ) . getPath ( ) ; for ( Codec codec : Codec . getCodecs ( ) ) { String prefix = codec . getParityPrefix ( ) ; if ( parityPathStr . startsWith ( prefix ) ) { String src = parit...
returns the source file corresponding to a parity file
32,612
public static void updateChunkChecksum ( byte [ ] buf , int checksumOff , int dataOff , int dataLen , DataChecksum checksum ) throws IOException { int bytesPerChecksum = checksum . getBytesPerChecksum ( ) ; int checksumSize = checksum . getChecksumSize ( ) ; int curChecksumOff = checksumOff ; int curDataOff = dataOff ;...
updates the checksum for a buffer
32,613
public static long copyBytesAndGenerateCRC ( InputStream in , OutputStream out , int buffSize , boolean close , IOThrottler throttler ) throws IOException { PrintStream ps = out instanceof PrintStream ? ( PrintStream ) out : null ; byte buf [ ] = new byte [ buffSize ] ; Checksum sum = new NativeCrc32 ( ) ; sum . reset ...
Copies from one stream to another and generate CRC checksum .
32,614
public static void writeFully ( FileChannel fc , ByteBuffer buf , long offset ) throws IOException { do { offset += fc . write ( buf , offset ) ; } while ( buf . remaining ( ) > 0 ) ; }
Write a ByteBuffer to a FileChannel at a given offset handling short writes .
32,615
public static void cloneInto ( Writable dst , Writable src ) throws IOException { ReflectionUtils . cloneWritableInto ( dst , src ) ; }
Make a copy of the writable object using serialiation to a buffer
32,616
public static String readStringSafely ( DataInput in , int maxLength ) throws IOException , IllegalArgumentException { int length = readVInt ( in ) ; if ( length < 0 || length > maxLength ) { throw new IllegalArgumentException ( "Encoded byte size for String was " + length + ", which is outside of 0.." + maxLength + " ...
Read a string but check it for sanity . The format consists of a vint followed by the given number of bytes .
32,617
protected void flushAndSync ( boolean durable ) throws IOException { if ( outputStream == null ) { throw new IOException ( "Trying to use aborted output stream!" ) ; } if ( doubleBuf . isFlushed ( ) ) { return ; } doubleBuf . flushTo ( outputStream ) ; }
Flush the contents of the ready buffer to a durable BookKeeper ledger BookKeeper ledger . After this method returns we are guaranteed to have persisted the log records stored in the ready buffer to a quorum of bookies that store the underlying ledger .
32,618
public void waitForRestart ( ) { if ( standbyThread != null ) { try { standbyThread . join ( ) ; } catch ( InterruptedException ie ) { } standbyThread = null ; LOG . info ( "waitForRestart: Standby thread exited." ) ; InjectionHandler . processEvent ( InjectionEvent . AVATARNODE_WAIT_FOR_RESTART ) ; while ( failoverSta...
Wait for the StandbyNode to exit . If it does then stop the underlying namenode .
32,619
public long getProtocolVersion ( String protocol , long clientVersion ) throws IOException { if ( protocol . equals ( AvatarProtocol . class . getName ( ) ) ) { return AvatarProtocol . versionID ; } else { return super . getProtocolVersion ( protocol , clientVersion ) ; } }
If the specified protocol is AvatarProtocol then return the AvatarProtocol version id otherwise delegate to the underlying namenode .
32,620
private void verifyEditStreams ( ) throws IOException { if ( getFSImage ( ) . getEditLog ( ) . isSharedJournalAvailable ( ) && InjectionHandler . trueCondition ( InjectionEvent . AVATARNODE_CHECKEDITSTREAMS ) ) { return ; } int expectedEditStreams = NNStorageConfiguration . getNamespaceEditsDirs ( confg ) . size ( ) ; ...
Return true if the shared journal is active or if the number of active journals is equal to the number of configured journals . Throw IOException otherwise .
32,621
public synchronized void shutdown ( boolean synchronous ) throws IOException { LOG . info ( "Failover: Asynchronous shutdown for: " + currentAvatar ) ; super . namesystem . checkSuperuserPrivilege ( ) ; if ( runInfo . shutdown ) { LOG . info ( "Failover: Node already shut down" ) ; return ; } verifyEditStreams ( ) ; ru...
Shuts down the avatar node
32,622
protected void stopRPC ( boolean interruptClientHandlers ) throws IOException { try { stopRPCInternal ( server , "avatardatanode" , interruptClientHandlers ) ; super . stopRPC ( interruptClientHandlers ) ; stopWaitRPCInternal ( server , "avatardatanode" ) ; } catch ( InterruptedException ex ) { throw new IOException ( ...
Stops all RPC threads and ensures that all RPC handlers have exited . Stops all communication to the namenode .
32,623
private static void printUsage ( ) { System . err . println ( "Usage: java AvatarNode [" + StartupOption . STANDBY . getName ( ) + "] | [" + StartupOption . NODEZERO . getName ( ) + "] | [" + StartupOption . NODEONE . getName ( ) + "] | [" + StartupOption . FORMAT . getName ( ) + "] | [" + StartupOption . UPGRADE . get...
Help message for a user
32,624
static void validateStartupOptions ( StartupInfo startInfo ) throws IOException { if ( startInfo . isStandby ) { if ( startInfo . startOpt == StartupOption . FORMAT || startInfo . startOpt == StartupOption . FINALIZE || startInfo . startOpt == StartupOption . ROLLBACK || startInfo . startOpt == StartupOption . UPGRADE ...
validates command line arguments
32,625
private static StartupInfo parseArguments ( String args [ ] ) { InstanceId instance = InstanceId . NODEZERO ; StartupOption startOpt = StartupOption . REGULAR ; boolean isStandby = false ; String serviceName = null ; boolean force = false ; int argsLen = ( args == null ) ? 0 : args . length ; for ( int i = 0 ; i < args...
Analyze the command line options
32,626
public static void initializeGenericKeys ( Configuration conf , String serviceKey ) { if ( ( serviceKey == null ) || serviceKey . isEmpty ( ) ) { return ; } NameNode . initializeGenericKeys ( conf , serviceKey ) ; DFSUtil . setGenericConf ( conf , serviceKey , AVATARSERVICE_SPECIFIC_KEYS ) ; adjustMetaDirectoryNames ( ...
In federation configuration is set for a set of avartanodes namenodes etc which are grouped under a logical nameservice ID . The configuration keys specific to them have suffix set to configured nameserviceId .
32,627
public static void adjustMetaDirectoryNames ( Configuration conf , String serviceKey ) { adjustMetaDirectoryName ( conf , DFS_SHARED_NAME_DIR0_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_SHARED_NAME_DIR1_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_SHARED_EDITS_DIR0_KEY , serviceKey ) ; adjus...
Append service name to each avatar meta directory name
32,628
private static void isPrimaryAlive ( String zkRegistry ) throws IOException { String parts [ ] = zkRegistry . split ( ":" ) ; if ( parts . length != 2 ) { throw new IllegalArgumentException ( "Invalid Address : " + zkRegistry ) ; } String host = parts [ 0 ] ; int port = Integer . parseInt ( parts [ 1 ] ) ; InetSocketAd...
Tries to bind to the address specified in ZooKeeper this will always fail if the primary is alive either on the same machine or on a remote machine .
32,629
static InetSocketAddress getRemoteNamenodeAddress ( Configuration conf , InstanceId instance ) throws IOException { String fs = null ; if ( instance == InstanceId . NODEZERO ) { fs = conf . get ( DFS_NAMENODE_RPC_ADDRESS1_KEY ) ; if ( fs == null ) fs = conf . get ( "fs.default.name1" ) ; } else if ( instance == Instanc...
Returns the address of the remote namenode
32,630
static String getRemoteNamenodeHttpName ( Configuration conf , InstanceId instance ) throws IOException { if ( instance == InstanceId . NODEZERO ) { return conf . get ( "dfs.http.address1" ) ; } else if ( instance == InstanceId . NODEONE ) { return conf . get ( "dfs.http.address0" ) ; } else { throw new IOException ( "...
Returns the name of the http server of the local namenode
32,631
private void setProxy ( String proxyStr ) { String [ ] strs = proxyStr . split ( ":" , 2 ) ; if ( strs . length != 2 ) throw new RuntimeException ( "Bad SOCKS proxy parameter: " + proxyStr ) ; String host = strs [ 0 ] ; int port = Integer . parseInt ( strs [ 1 ] ) ; this . proxy = new Proxy ( Proxy . Type . SOCKS , Ine...
Set the proxy of this socket factory as described in the string parameter
32,632
public void addResourceMetadata ( String resourceName , ResourceMetadata resourceMetadata ) { if ( resourceMetadataMap . put ( resourceName , resourceMetadata ) != null ) { throw new RuntimeException ( "Resource name " + resourceName + " already exists!" ) ; } }
Add resource metadata for this pool .
32,633
public ResourceMetadata getResourceMetadata ( String resourceName ) { if ( ! resourceMetadataMap . containsKey ( resourceName ) ) { throw new RuntimeException ( "No resource metadata for " + resourceName ) ; } return resourceMetadataMap . get ( resourceName ) ; }
Get resource metadata for a resource name
32,634
private void readClusterNodeInfo ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "clusterNodeInfo" ) ; clusterNodeInfo = new ClusterNodeInfo ( ) ; coronaSerializer . readStartObjectToken ( "clusterNodeInfo" ) ; coronaSerializer . readField ( "name" ) ; clusterNodeInfo . name = ...
Reads the clusterNodeInfo object from the JSON stream
32,635
private void readGrants ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readStartObjectToken ( "grants" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { coronaSerializer . readStartObjectToken ( "grant" ) ; coronaSerializer . readFiel...
Reads the list of grants from the JSON stream
32,636
public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "clusterNodeInfo" ) ; jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeStringField ( "name" , clusterNodeInfo . name ) ; jsonGenerator . writeObjectField ( "address" ...
Used to write the state of the ClusterNode instance to disk when we are persisting the state of the NodeManager
32,637
public void initResourceTypeToMaxCpuMap ( Map < Integer , Map < ResourceType , Integer > > cpuToResourcePartitioning ) { resourceTypeToMaxCpu = getResourceTypeToCountMap ( ( int ) clusterNodeInfo . total . numCpus , cpuToResourcePartitioning ) ; }
This method is used to initialize the resource type to max CPU mapping based upon the cpuToResourcePartitioning instance given
32,638
public static Map < ResourceType , Integer > getResourceTypeToCountMap ( int numCpus , Map < Integer , Map < ResourceType , Integer > > cpuToResourcePartitioning ) { Map < ResourceType , Integer > ret = cpuToResourcePartitioning . get ( numCpus ) ; if ( ret == null ) { Map < ResourceType , Integer > oneCpuMap = cpuToRe...
Get a mapping of the resource type to amount of resources for a given number of cpus .
32,639
private void processINodesUC ( DataInputStream in , ImageVisitor v , boolean skipBlocks ) throws IOException { int numINUC = in . readInt ( ) ; v . visitEnclosingElement ( ImageElement . INODES_UNDER_CONSTRUCTION , ImageElement . NUM_INODES_UNDER_CONSTRUCTION , numINUC ) ; for ( int i = 0 ; i < numINUC ; i ++ ) { check...
Process the INodes under construction section of the fsimage .
32,640
private void processBlocks ( DataInputStream in , ImageVisitor v , int numBlocks , boolean skipBlocks ) throws IOException { v . visitEnclosingElement ( ImageElement . BLOCKS , ImageElement . NUM_BLOCKS , numBlocks ) ; if ( numBlocks == - 1 || numBlocks == - 2 ) { v . leaveEnclosingElement ( ) ; return ; } if ( skipBlo...
Process the blocks section of the fsimage .
32,641
private void processPermission ( DataInputStream in , ImageVisitor v ) throws IOException { v . visitEnclosingElement ( ImageElement . PERMISSIONS ) ; v . visit ( ImageElement . USER_NAME , Text . readStringOpt ( in ) ) ; v . visit ( ImageElement . GROUP_NAME , Text . readStringOpt ( in ) ) ; FsPermission fsp = new FsP...
Extract the INode permissions stored in the fsimage file .
32,642
private void processINodes ( DataInputStream in , ImageVisitor v , long numInodes , boolean skipBlocks ) throws IOException { v . visitEnclosingElement ( ImageElement . INODES , ImageElement . NUM_INODES , numInodes ) ; if ( LayoutVersion . supports ( Feature . FSIMAGE_NAME_OPTIMIZATION , imageVersion ) ) { processLoca...
Process the INode records stored in the fsimage .
32,643
private void processINode ( DataInputStream in , ImageVisitor v , boolean skipBlocks , String parentName ) throws IOException { checkInterruption ( ) ; v . visitEnclosingElement ( ImageElement . INODE ) ; String pathName = FSImageSerialization . readString ( in ) ; if ( parentName != null ) { pathName = "/" + pathName ...
Process an INode
32,644
Map < AsyncLogger , NewEpochResponseProto > createNewUniqueEpoch ( ) throws IOException { Preconditions . checkState ( ! loggers . isEpochEstablished ( ) , "epoch already created" ) ; Map < AsyncLogger , GetJournalStateResponseProto > lastPromises = loggers . waitForWriteQuorum ( loggers . getJournalState ( ) , getJour...
Fence any previous writers and obtain a unique epoch number for write - access to the journal nodes .
32,645
private boolean hasSomeDataInternal ( boolean image ) throws IOException { QuorumCall < AsyncLogger , Boolean > call = image ? loggers . isImageFormatted ( ) : loggers . isJournalFormatted ( ) ; try { call . waitFor ( loggers . size ( ) , 0 , 0 , hasDataTimeoutMs , "hasSomeData" ) ; } catch ( InterruptedException e ) {...
Checks if any data is available in the underlying storage . Returns true if any of the nodes has some data .
32,646
public void selectInputStreams ( Collection < EditLogInputStream > streams , long fromTxnId , boolean inProgressOk , boolean validateInProgressSegments ) throws IOException { QuorumCall < AsyncLogger , RemoteEditLogManifest > q = loggers . getEditLogManifest ( fromTxnId ) ; Map < AsyncLogger , RemoteEditLogManifest > r...
Select input streams . inProgressOk should be true only for tailing not for startup
32,647
public OutputStream getCheckpointOutputStream ( long txid ) throws IOException { return new HttpImageUploadStream ( httpAddresses , journalId , nsInfo , txid , loggers . getEpoch ( ) , imageUploadBufferSize , imageUploadMaxBufferedChunks ) ; }
Creates output stream for image at txid to the underlying quorum of journal nodes .
32,648
public boolean saveDigestAndRenameCheckpointImage ( long txid , MD5Hash digest ) { try { LOG . info ( "Saving md5: " + digest + " for txid: " + txid ) ; QuorumCall < AsyncLogger , Void > q = loggers . saveDigestAndRenameCheckpointImage ( txid , digest ) ; loggers . waitForWriteQuorum ( q , writeTxnsTimeoutMs , "saveDig...
Roll image and save md5 digest to the underlying nodes . This is a quorum roll and we ensure that it can succeed only on the nodes that consumed entirely the uploaded image .
32,649
public RemoteImageManifest getImageManifest ( long fromTxnId ) throws IOException { QuorumCall < AsyncLogger , RemoteImageManifest > q = loggers . getImageManifest ( fromTxnId ) ; Map < AsyncLogger , RemoteImageManifest > resps = loggers . waitForReadQuorumWithAllResponses ( q , getImageManifestTimeoutMs , "getImageMan...
Get manifest for the images stored in journal nodes . An image is considered valid if it appears in majority of the nodes with a valid md5 sum . The returned images are sorted according to their transaction id .
32,650
static RemoteImageManifest createImageManifest ( Collection < RemoteImageManifest > resps ) throws IOException { Map < Long , RemoteImage > images = Maps . newHashMap ( ) ; for ( RemoteImageManifest rm : resps ) { for ( RemoteImage ri : rm . getImages ( ) ) { if ( ri . getDigest ( ) == null ) { LOG . info ( "Skipping: ...
Concatenate manifests obtained from the underlying journalnodes . The final manifest will contain only the images committed to the majority of the nodes . Images with no md5 associated are ignored . Also the md5 must match between images from different journal nodes .
32,651
public ImageInputStream getImageInputStream ( long txid ) throws IOException { URLImageInputStream stream = loggers . getImageInputStream ( txid , httpConnectReadTimeoutMs ) ; if ( stream == null ) { throw new IOException ( "Cannot obtain input stream for image: " + txid ) ; } return new ImageInputStream ( txid , strea...
Get input stream to one of the nodes for given txid .
32,652
public static MD5Hash read ( DataInput in ) throws IOException { MD5Hash result = new MD5Hash ( ) ; result . readFields ( in ) ; return result ; }
Constructs reads and returns an instance .
32,653
public static MD5Hash digest ( InputStream in ) throws IOException { final byte [ ] buffer = new byte [ 4 * 1024 ] ; int fileLength = 0 ; final MessageDigest digester = DIGESTER_FACTORY . get ( ) ; for ( int n ; ( n = in . read ( buffer ) ) != - 1 ; ) { digester . update ( buffer , 0 , n ) ; fileLength += n ; } return ...
Construct a hash value for the content from the InputStream .
32,654
public static MD5Hash digest ( byte [ ] data , int start , int len ) { byte [ ] digest ; MessageDigest digester = DIGESTER_FACTORY . get ( ) ; digester . update ( data , start , len ) ; digest = digester . digest ( ) ; return new MD5Hash ( digest ) ; }
Construct a hash value for a byte array .
32,655
public void forceClean ( ) { while ( true ) { PathDeletionContext context = null ; try { context = cleanupThread . queue . poll ( 50L , TimeUnit . MILLISECONDS ) ; if ( context == null ) { return ; } if ( ! deletePath ( context ) ) { LOG . warn ( "forceClean:Unable to delete path " + context . fullPath ) ; } else { LOG...
Force to clean the all path it should be called when task tracker is shut down Now we only called it in MiniCoronaCluster to make sure the unit test run in a clean fixture
32,656
public void stop ( ) { running = false ; if ( server != null ) server . stop ( ) ; if ( aggregateDaemon != null ) aggregateDaemon . interrupt ( ) ; }
Stop all Collector threads and wait for all to finish .
32,657
public TaskTrackerUtilization getTaskTrackerUtilization ( String hostName ) throws IOException { if ( taskTrackerReports . get ( hostName ) == null ) { return null ; } return taskTrackerReports . get ( hostName ) . getTaskTrackerUtilization ( ) ; }
Implement CollectorProtocol methods
32,658
public static void main ( String argv [ ] ) throws Exception { StringUtils . startupShutdownMessage ( UtilizationCollector . class , argv , LOG ) ; try { Configuration conf = new Configuration ( ) ; UtilizationCollector collector = new UtilizationCollector ( conf ) ; if ( collector != null ) { collector . join ( ) ; } ...
main program to run on the Collector server
32,659
private static void checkPaths ( Configuration conf , List < Path > paths ) throws IOException { for ( Path p : paths ) { FileSystem fs = p . getFileSystem ( conf ) ; if ( ! fs . exists ( p ) ) { throw new FileNotFoundException ( "Source " + p + " does not exist." ) ; } } }
check the src paths
32,660
private Path relPathToRoot ( Path fullPath , Path root ) { Path justRoot = new Path ( Path . SEPARATOR ) ; if ( fullPath . depth ( ) == root . depth ( ) ) { return justRoot ; } else if ( fullPath . depth ( ) > root . depth ( ) ) { Path retPath = new Path ( fullPath . getName ( ) ) ; Path parent = fullPath . getParent (...
truncate the prefix root from the full path
32,661
private void cleanJobDirectory ( ) { try { FileSystem jobfs = jobDirectory . getFileSystem ( conf ) ; jobfs . delete ( jobDirectory , true ) ; } catch ( IOException ioe ) { LOG . warn ( "Unable to clean tmp directory " + jobDirectory , ioe ) ; } }
delete the tmp job directory
32,662
private void archive ( Path parentPath , List < Path > srcPaths , Path outputPath , boolean append ) throws IOException { parentPath = parentPath . makeQualified ( parentPath . getFileSystem ( conf ) ) ; checkPaths ( conf , srcPaths ) ; Path destinationDir = outputPath . getParent ( ) ; FileOutputFormat . setOutputPath...
archive the given source paths into the dest
32,663
private static void writeLineToMasterIndex ( FSDataOutputStream stream , long startHash , long endHash , long indexStartPos , long indexEndPos ) throws IOException { String toWrite = startHash + " " + endHash + " " + indexStartPos + " " + indexEndPos + "\n" ; stream . write ( toWrite . getBytes ( ) ) ; }
Writes data corresponding to part of _index to master index
32,664
private FSDataOutputStream createNewPartStream ( Path dst , int partId ) throws IOException { String partName = PART_PREFIX + partId ; Path output = new Path ( dst , partName ) ; FileSystem destFs = output . getFileSystem ( conf ) ; FSDataOutputStream partStream = destFs . create ( output , false , conf . getInt ( "io....
Creates new stream to write actual file data
32,665
public static String ask ( String prompt , String firstChoice , String ... choices ) throws IOException { while ( true ) { LOG . info ( prompt ) ; StringBuilder responseBuilder = new StringBuilder ( ) ; while ( true ) { int c = System . in . read ( ) ; if ( c == - 1 || c == '\r' || c == '\n' ) { break ; } responseBuild...
Display a prompt to the user and get his or her choice .
32,666
public void launchTask ( TaskInProgress tip ) throws IOException { LOG . info ( "Launching simulated task " + tip . getTask ( ) . getTaskID ( ) + " for job " + tip . getTask ( ) . getJobID ( ) ) ; TaskUmbilicalProtocol umbilicalProtocol = taskTracker . getUmbilical ( tip ) ; if ( tip . getTask ( ) . isMapTask ( ) || ti...
The primary public method that should be called to run a task . Handles both map and reduce tasks and marks them as completed after the configured time interval
32,667
protected void addTipToFinish ( TaskInProgress tip , TaskUmbilicalProtocol umbilicalProtocol ) { long currentTime = System . currentTimeMillis ( ) ; long finishTime = currentTime + Math . abs ( rand . nextLong ( ) ) % timeToFinishTask ; LOG . info ( "Adding TIP " + tip . getTask ( ) . getTaskID ( ) + " to finishing que...
Add the specified TaskInProgress to the priority queue of tasks to finish .
32,668
public void run ( ) { while ( true ) { TipToFinish ttf = null ; try { LOG . debug ( "Waiting for a TIP" ) ; ttf = tipQueue . take ( ) ; } catch ( InterruptedException e ) { LOG . info ( "Got interrupted exception while waiting to take()" ) ; continue ; } LOG . debug ( " Got a TIP " + ttf . getTip ( ) . getTask ( ) . ge...
Continuously looks through the queue of TIP s to mark as finished finishing and sleeping as necessary . Can be interrupted while it s sleeping if it needs to re - evaluate how long to sleep .
32,669
public void cancel ( TaskInProgress tip ) { LOG . info ( "Canceling task " + tip . getTask ( ) . getTaskID ( ) + " of job " + tip . getTask ( ) . getJobID ( ) ) ; if ( ! tip . getTask ( ) . isMapTask ( ) && ! tip . getTask ( ) . isTaskCleanupTask ( ) ) { if ( ! mapperWaitThreadMap . containsKey ( tip ) ) { throw new Ru...
Called in case the task needs to be killed . Canceling will kill any map wait threads and also remove it from the queue of tasks that should be marked as finished .
32,670
public EventRecord parseLine ( String line ) throws IOException { EventRecord retval = null ; if ( line != null ) { String patternStr = "(" + dateformat + ")" ; patternStr += "\\s+" ; patternStr += "(" + timeformat + ")" ; patternStr += ".{4}\\s(\\w*)\\s" ; patternStr += "\\s*([\\w+\\.?]+)" ; patternStr += ":\\s+(.+)" ...
Parses one line of the log . If the line contains a valid log entry then an appropriate EventRecord is returned after all relevant fields have been parsed .
32,671
protected Calendar parseDate ( String strDate , String strTime ) { Calendar retval = Calendar . getInstance ( ) ; String [ ] fields = strDate . split ( "-" ) ; retval . set ( Calendar . YEAR , Integer . parseInt ( fields [ 0 ] ) ) ; retval . set ( Calendar . MONTH , Integer . parseInt ( fields [ 1 ] ) ) ; retval . set ...
Parse a date found in the Hadoop log .
32,672
private void findHostname ( ) { String startupInfo = Environment . runCommand ( "grep --max-count=1 STARTUP_MSG:\\s*host " + file . getName ( ) ) . toString ( ) ; Pattern pattern = Pattern . compile ( "\\s+(\\w+/.+)\\s+" ) ; Matcher matcher = pattern . matcher ( startupInfo ) ; if ( matcher . find ( 0 ) ) { hostname = ...
Attempt to determine the hostname of the node that created the log file . This information can be found in the STARTUP_MSG lines of the Hadoop log which are emitted when the node starts .
32,673
public boolean addToCorruptReplicasMap ( Block blk , DatanodeDescriptor dn ) { Collection < DatanodeDescriptor > nodes = getNodes ( blk ) ; if ( nodes == null ) { nodes = new TreeSet < DatanodeDescriptor > ( ) ; corruptReplicasMap . put ( blk , nodes ) ; } boolean added = false ; if ( ! nodes . contains ( dn ) ) { adde...
Mark the block belonging to datanode as corrupt .
32,674
boolean removeFromCorruptReplicasMap ( Block blk , DatanodeDescriptor datanode ) { Collection < DatanodeDescriptor > datanodes = corruptReplicasMap . get ( blk ) ; if ( datanodes == null ) return false ; if ( datanodes . remove ( datanode ) ) { if ( datanodes . isEmpty ( ) ) { corruptReplicasMap . remove ( blk ) ; } re...
Remove the block at the given datanode from CorruptBlockMap
32,675
Collection < DatanodeDescriptor > getNodes ( Block blk ) { if ( corruptReplicasMap . size ( ) == 0 ) return null ; return corruptReplicasMap . get ( blk ) ; }
Get Nodes which have corrupt replicas of Block
32,676
boolean isReplicaCorrupt ( Block blk , DatanodeDescriptor node ) { Collection < DatanodeDescriptor > nodes = getNodes ( blk ) ; return ( ( nodes != null ) && ( nodes . contains ( node ) ) ) ; }
Check if replica belonging to Datanode is corrupt
32,677
static int getFingerprint ( Method method ) { int hashcode = method . getName ( ) . hashCode ( ) ; hashcode = hashcode + 31 * method . getReturnType ( ) . getName ( ) . hashCode ( ) ; for ( Class < ? > type : method . getParameterTypes ( ) ) { hashcode = 31 * hashcode ^ type . getName ( ) . hashCode ( ) ; } return hash...
Calculate a method s hash code considering its method name returning type and its parameter types
32,678
private static int [ ] getFingerprints ( Method [ ] methods ) { if ( methods == null ) { return null ; } int [ ] hashCodes = new int [ methods . length ] ; for ( int i = 0 ; i < methods . length ; i ++ ) { hashCodes [ i ] = getFingerprint ( methods [ i ] ) ; } return hashCodes ; }
Convert an array of Method into an array of hash codes
32,679
private static ProtocolSigFingerprint getSigFingerprint ( Class < ? extends VersionedProtocol > protocol , long serverVersion ) { String protocolName = protocol . getName ( ) ; synchronized ( PROTOCOL_FINGERPRINT_CACHE ) { ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE . get ( protocolName ) ; if ( sig == null...
Return a protocol s signature and finger print from cache
32,680
public static void incrComputeSpecs ( ComputeSpecs target , ComputeSpecs incr ) { target . numCpus += incr . numCpus ; target . memoryMB += incr . memoryMB ; target . diskGB += incr . diskGB ; }
Increase the compute specs
32,681
public static void decrComputeSpecs ( ComputeSpecs target , ComputeSpecs decr ) { target . numCpus -= decr . numCpus ; target . memoryMB -= decr . memoryMB ; target . diskGB -= decr . diskGB ; }
Decrease the compute specs by decr
32,682
public static void waitThreadTermination ( Thread thread ) { while ( thread != null && thread . isAlive ( ) ) { thread . interrupt ( ) ; try { thread . join ( ) ; } catch ( InterruptedException e ) { } } }
A realiable way to wait for the thread termination
32,683
public static InetAddress appInfoToAddress ( String info ) { Matcher m = INET_ADDRESS_PATTERN . matcher ( info ) ; if ( m . find ( ) ) { int port = Integer . parseInt ( m . group ( 2 ) ) ; return new InetAddress ( m . group ( 1 ) , port ) ; } return null ; }
Convert the appinfo string to the address the application is available on
32,684
public static void makeProcessExitOnUncaughtException ( final Log log ) { Thread . setDefaultUncaughtExceptionHandler ( new Thread . UncaughtExceptionHandler ( ) { public void uncaughtException ( Thread t , Throwable e ) { log . error ( "UNCAUGHT: Thread " + t . getName ( ) + " got an uncaught exception" , e ) ; System...
Sets an uncaught exception handler . This will make the process exit with exit code 1 if a thread exits due to an uncaught exception .
32,685
public static void interruptedException ( String msg , InterruptedException e ) throws IOException { Thread . currentThread ( ) . interrupt ( ) ; LOG . error ( msg , e ) ; throw new IOException ( msg , e ) ; }
Interrupt the thread and then log and re - throw an InterruptedException as an IOException .
32,686
private static long getBlockSize ( LocatedBlocks lbs ) throws IOException { List < LocatedBlock > locatedBlocks = lbs . getLocatedBlocks ( ) ; long bs = - 1 ; for ( LocatedBlock lb : locatedBlocks ) { if ( lb . getBlockSize ( ) > bs ) { bs = lb . getBlockSize ( ) ; } } return bs ; }
Obtain block size given 3 or more blocks
32,687
public boolean undelete ( Path f , String userName ) throws IOException { List < Codec > codecList = Codec . getCodecs ( ) ; Path [ ] parityPathList = new Path [ codecList . size ( ) ] ; for ( int i = 0 ; i < parityPathList . length ; i ++ ) { parityPathList [ i ] = new Path ( codecList . get ( i ) . parityDirectory , ...
undelete the parity file together with the src file .
32,688
private boolean searchHarDir ( FileStatus stat ) throws IOException { if ( ! stat . isDir ( ) ) { return false ; } String pattern = stat . getPath ( ) . toString ( ) + "/*" + RaidNode . HAR_SUFFIX + "*" ; FileStatus [ ] stats = globStatus ( new Path ( pattern ) ) ; if ( stats != null && stats . length > 0 ) { return tr...
search the Har - ed parity files
32,689
public INodeHardLinkFile getHardLinkedFile ( int i ) { if ( i < this . linkedFiles . size ( ) ) { return this . linkedFiles . get ( i ) ; } return null ; }
Return the i th of the hardlinked file
32,690
public void removeLinkedFile ( INodeHardLinkFile file ) { for ( int i = 0 ; i < linkedFiles . size ( ) ; i ++ ) { if ( linkedFiles . get ( i ) == file ) { linkedFiles . remove ( i ) ; break ; } } INodeFile newOwner = null ; if ( linkedFiles . size ( ) == 1 ) { INodeHardLinkFile lastReferencedFile = linkedFiles . get ( ...
Remove an INodeHardLinkFile from the linkedFiles . This function is not thread - safe . The caller is supposed to have a writeLock .
32,691
protected void setPermissionStatus ( PermissionStatus ps ) { for ( INodeHardLinkFile linkedFile : linkedFiles ) { linkedFile . setPermissionStatus ( ps , false ) ; } }
Set the PermissionSatus for all the linked files
32,692
protected void setUser ( String user ) { for ( INodeHardLinkFile linkedFile : linkedFiles ) { linkedFile . setUser ( user , false ) ; } }
Set the user name for all the linked files
32,693
protected void setGroup ( String group ) { for ( INodeHardLinkFile linkedFile : linkedFiles ) { linkedFile . setGroup ( group , false ) ; } }
Set the group name for all the linked files
32,694
protected void setPermission ( FsPermission permission ) { for ( INodeHardLinkFile linkedFile : linkedFiles ) { linkedFile . setPermission ( permission , false ) ; } }
Set the permission for all the linked files
32,695
public static void writeRAMFiles ( DataOutput out , RAMDirectory dir , String [ ] names ) throws IOException { out . writeInt ( names . length ) ; for ( int i = 0 ; i < names . length ; i ++ ) { Text . writeString ( out , names [ i ] ) ; long length = dir . fileLength ( names [ i ] ) ; out . writeLong ( length ) ; if (...
Write a number of files from a ram directory to a data output .
32,696
public static void readRAMFiles ( DataInput in , RAMDirectory dir ) throws IOException { int numFiles = in . readInt ( ) ; for ( int i = 0 ; i < numFiles ; i ++ ) { String name = Text . readString ( in ) ; long length = in . readLong ( ) ; if ( length > 0 ) { IndexOutput output = null ; try { output = dir . createOutpu...
Read a number of files from a data input to a ram directory .
32,697
public void record ( TaskInProgress tip , String host , long inputBytes ) { synchronized ( localityRecords ) { localityRecords . add ( new Record ( tip , host , inputBytes ) ) ; localityRecords . notify ( ) ; } }
Asynchronous update of locality .
32,698
private void computeStatistics ( Record record ) { computeStatistics ( record . tip , record . host , record . inputBytes ) ; }
Peform the computation statistics based on a locality record .
32,699
private void computeStatistics ( TaskInProgress tip , String host , long inputBytes ) { int level = this . maxLevel ; String [ ] splitLocations = tip . getSplitLocations ( ) ; if ( splitLocations . length > 0 ) { Node tracker = topologyCache . getNode ( host ) ; for ( String local : splitLocations ) { Node datanode = t...
Peform the computation statistics .