idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,800
private void initialize ( Configuration conf ) throws IOException { JvmMetrics . init ( "SecondaryNameNode" , conf . get ( "session.id" ) ) ; shouldRun = true ; nameNodeAddr = NameNode . getClientProtocolAddress ( conf ) ; this . conf = conf ; this . namenode = ( NamenodeProtocol ) RPC . waitForProxy ( NamenodeProtocol . class , NamenodeProtocol . versionID , nameNodeAddr , conf ) ; this . namenode . register ( ) ; fsName = getInfoServer ( ) ; checkpointDirs = getFileStorageDirs ( NNStorageConfiguration . getCheckpointDirs ( conf , "/tmp/hadoop/dfs/namesecondary" ) ) ; checkpointEditsDirs = getFileStorageDirs ( NNStorageConfiguration . getCheckpointEditsDirs ( conf , "/tmp/hadoop/dfs/namesecondary" ) ) ; checkpointImage = new CheckpointStorage ( conf ) ; checkpointImage . recoverCreate ( checkpointDirs , checkpointEditsDirs ) ; checkpointPeriod = conf . getLong ( "fs.checkpoint.period" , 3600 ) ; checkpointTxnCount = NNStorageConfiguration . getCheckpointTxnCount ( conf ) ; String infoAddr = NetUtils . getServerAddress ( conf , "dfs.secondary.info.bindAddress" , "dfs.secondary.info.port" , "dfs.secondary.http.address" ) ; InetSocketAddress infoSocAddr = NetUtils . createSocketAddr ( infoAddr ) ; String infoBindIpAddress = infoSocAddr . getAddress ( ) . getHostAddress ( ) ; int tmpInfoPort = infoSocAddr . getPort ( ) ; infoServer = new HttpServer ( "secondary" , infoBindIpAddress , tmpInfoPort , tmpInfoPort == 0 , conf ) ; infoServer . setAttribute ( "name.system.image" , checkpointImage ) ; this . infoServer . setAttribute ( "name.conf" , conf ) ; infoServer . addInternalServlet ( "getimage" , "/getimage" , GetImageServlet . class ) ; infoServer . start ( ) ; infoPort = infoServer . getPort ( ) ; conf . set ( "dfs.secondary.http.address" , infoBindIpAddress + ":" + infoPort ) ; LOG . info ( "Secondary Web-server up at: " + infoBindIpAddress + ":" + infoPort ) ; LOG . warn ( "Checkpoint Period :" + checkpointPeriod + " secs " + "(" + checkpointPeriod / 60 + " min)" ) ; LOG . warn ( "Log Size Trigger :" + checkpointTxnCount + " transactions " ) ; }
Initialize SecondaryNameNode .
33,801
public void shutdown ( ) { shouldRun = false ; try { if ( infoServer != null ) infoServer . stop ( ) ; } catch ( Exception e ) { LOG . warn ( "Exception shutting down SecondaryNameNode" , e ) ; } try { if ( checkpointImage != null ) checkpointImage . close ( ) ; } catch ( IOException e ) { LOG . warn ( StringUtils . stringifyException ( e ) ) ; } }
Shut down this instance of the datanode . Returns only after shutdown is complete .
33,802
private String getInfoServer ( ) throws IOException { URI fsName = FileSystem . getDefaultUri ( conf ) ; if ( ! "hdfs" . equals ( fsName . getScheme ( ) ) ) { throw new IOException ( "This is not a DFS" ) ; } return NetUtils . getServerAddress ( conf , "dfs.info.bindAddress" , "dfs.info.port" , "dfs.http.address" ) ; }
Returns the Jetty server that the Namenode is listening on .
33,803
boolean doCheckpoint ( ) throws IOException { LOG . info ( "Checkpoint starting" ) ; startCheckpoint ( ) ; checkpointImage . ensureCurrentDirExists ( ) ; NNStorage dstStorage = checkpointImage . storage ; CheckpointSignature sig = namenode . rollEditLog ( ) ; if ( checkpointImage . getNamespaceID ( ) != 0 ) { sig . validateStorageInfo ( checkpointImage . storage ) ; } else { dstStorage . setStorageInfo ( sig ) ; } InjectionHandler . processEventIO ( InjectionEvent . SECONDARYNAMENODE_CHECKPOINT0 ) ; RemoteEditLogManifest manifest = namenode . getEditLogManifest ( sig . mostRecentCheckpointTxId + 1 ) ; boolean loadImage = downloadCheckpointFiles ( fsName , checkpointImage , sig , manifest ) ; doMerge ( sig , manifest , loadImage , checkpointImage ) ; long txid = checkpointImage . getLastAppliedTxId ( ) ; TransferFsImage . uploadImageFromStorage ( fsName , InetAddress . getLocalHost ( ) . getHostAddress ( ) , infoPort , dstStorage , txid ) ; InjectionHandler . processEventIO ( InjectionEvent . SECONDARYNAMENODE_CHECKPOINT1 ) ; namenode . rollFsImage ( new CheckpointSignature ( checkpointImage ) ) ; LOG . warn ( "Checkpoint done. New Image Size: " + dstStorage . getFsImageName ( StorageLocationType . LOCAL , txid ) . length ( ) ) ; checkpointImage . purgeOldStorage ( ) ; return loadImage ; }
Create a new checkpoint
33,804
private void doMerge ( CheckpointSignature sig , RemoteEditLogManifest manifest , boolean loadImage , FSImage dstImage ) throws IOException { if ( loadImage ) { namesystem = new FSNamesystem ( checkpointImage , conf ) ; checkpointImage . setFSNamesystem ( namesystem ) ; } assert namesystem . dir . fsImage == checkpointImage ; checkpointImage . doMerge ( sig , manifest , loadImage ) ; }
Merge downloaded image and edits and write the new image into current storage directory .
33,805
private void writeControlFile ( FileSystem fs , Path outputPath , Path checksumFile , String name ) throws IOException { SequenceFile . Writer write = null ; try { Path parentDir = new Path ( rtc . input , "filelists" ) ; if ( ! fs . exists ( parentDir ) ) { fs . mkdirs ( parentDir ) ; } Path controlFile = new Path ( parentDir , name ) ; write = SequenceFile . createWriter ( fs , fs . getConf ( ) , controlFile , Text . class , Text . class , CompressionType . NONE ) ; write . append ( new Text ( outputPath . toString ( ) ) , new Text ( checksumFile . toString ( ) ) ) ; } finally { if ( write != null ) write . close ( ) ; write = null ; } }
This is used for verification Each mapper writes one control file control file only contains the base directory written by this mapper and the checksum file path so that we could create a Read mapper which scanned the files under the base directory and verify the checksum of files with the information given in the checksum file .
33,806
public GenThread [ ] prepare ( JobConf conf , Text key , Text value ) throws IOException { this . rtc = new GenWriterRunTimeConstants ( ) ; super . prepare ( conf , key , value , rtc ) ; rtc . task_name = key . toString ( ) + rtc . taskID ; rtc . roll_interval = conf . getLong ( WRITER_ROLL_INTERVAL_KEY , DEFAULT_ROLL_INTERVAL_SEC ) * 1000 ; rtc . sync_interval = conf . getLong ( WRITER_SYNC_INTERVAL_KEY , DEFAULT_SYNC_INTERVAL_SEC ) * 1000 ; rtc . max_time = conf . getLong ( MAX_TIME_SEC_KEY , DEFAULT_MAX_TIME_SEC ) * 1000 ; rtc . data_rate = conf . getLong ( WRITER_DATARATE_KEY , DEFAULT_DATA_RATE ) * 1024 ; rtc . input = value . toString ( ) ; LOG . info ( "data rate: " + rtc . data_rate ) ; GenWriterThread [ ] threads = new GenWriterThread [ ( int ) rtc . nthreads ] ; for ( int i = 0 ; i < rtc . nthreads ; i ++ ) { threads [ i ] = new GenWriterThread ( conf , new Path ( new Path ( rtc . input , rtc . task_name ) , rtc . task_name + "_" + i ) , rtc . task_name , i , rtc ) ; } return threads ; }
Create a number of threads to generate write traffics
33,807
private void setCatchingUp ( ) throws IOException { try { if ( inputEditStream != null && inputEditStream . isInProgress ( ) ) { catchingUp = ( inputEditStream . length ( ) - inputEditStream . getPosition ( ) > catchUpLag ) ; } else { catchingUp = true ; } } catch ( Exception e ) { catchingUp = true ; } }
Checks if the ingest is catching up . If the ingest is consuming finalized segment it s assumed to be behind . Otherwise catching up is based on the position of the input stream .
33,808
public long getLagBytes ( ) { try { if ( inputEditStream != null && inputEditStream . isInProgress ( ) ) { return Math . max ( - 1 , inputEditStream . length ( ) - this . inputEditStream . getPosition ( ) ) ; } return - 1 ; } catch ( IOException ex ) { LOG . error ( "Error getting the lag" , ex ) ; return - 1 ; } }
Returns the distance in bytes between the current position inside of the edits log and the length of the edits log
33,809
private int loadFSEdits ( ) throws IOException { FSDirectory fsDir = fsNamesys . dir ; int numEdits = 0 ; long startTime = FSNamesystem . now ( ) ; LOG . info ( "Ingest: Consuming transactions: " + this . toString ( ) ) ; try { logVersion = inputEditStream . getVersion ( ) ; if ( ! LayoutVersion . supports ( Feature . TXID_BASED_LAYOUT , logVersion ) ) throw new RuntimeException ( "Log version is too old" ) ; currentPosition = inputEditStream . getPosition ( ) ; numEdits = ingestFSEdits ( ) ; } finally { LOG . info ( "Ingest: Closing ingest for segment: " + this . toString ( ) ) ; if ( endTxId == - 1 && fsDir . fsImage . getEditLog ( ) . isOpen ( ) ) { fsDir . fsImage . getEditLog ( ) . logSync ( ) ; } inputEditStream . close ( ) ; standby . clearIngestState ( ) ; } LOG . info ( "Ingest: Edits segment: " + this . toString ( ) + " edits # " + numEdits + " loaded in " + ( FSNamesystem . now ( ) - startTime ) / 1000 + " seconds." ) ; if ( logVersion != FSConstants . LAYOUT_VERSION ) numEdits ++ ; return numEdits ; }
Load an edit log and continue applying the changes to the in - memory structure . This is where we ingest transactions into the standby .
33,810
private FSEditLogOp ingestFSEdit ( EditLogInputStream inputEditLog ) throws IOException { FSEditLogOp op = null ; try { op = inputEditLog . readOp ( ) ; InjectionHandler . processEventIO ( InjectionEvent . INGEST_READ_OP ) ; } catch ( EOFException e ) { return null ; } catch ( IOException e ) { throw e ; } catch ( Exception e ) { throw new IOException ( e ) ; } return op ; }
Read a single transaction from the input edit log
33,811
private boolean shouldLoad ( long txid ) { boolean shouldLoad = txid > standby . getLastCorrectTxId ( ) ; if ( ! shouldLoad ) { LOG . info ( "Ingest: skip loading txId: " + txid + " to namesystem, but writing to edit log, last correct txid: " + standby . getLastCorrectTxId ( ) ) ; } return shouldLoad ; }
Used for ingest recovery where we erase the local edit log and transactions need to be populated to the local log but they should be not loaded into the namespace .
33,812
Path mapCachePath ( Path hdfsPath ) { assert hdfsPath . isAbsolute ( ) ; Path value = new Path ( cacheDir + Path . SEPARATOR + hdfsPath ) ; return value ; }
Maps a hdfs path into a pathname in the local cache . In the current implementation the cachePath is the same as the hdfs pathname .
33,813
public void evictCache ( Path hdfsPath , Path localPath , long size ) throws IOException { boolean done = cacheFs . delete ( localPath , false ) ; if ( ! done ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Evict for path: " + hdfsPath + " local path " + localPath + " unsuccessful." ) ; } } }
Evicts a file from the cache . If the cache is exceeding capacity then the cache calls this method to indicate that it is evicting a file from the cache . This is part of the Eviction Interface .
33,814
public FSDataOutputStream create ( Path f , FsPermission permission , boolean overwrite , int bufferSize , short replication , long blockSize , Progressable progress ) throws IOException { FSDataOutputStream fd = new FSDataOutputStream ( new CacheOutputStream ( conf , this , f , permission , overwrite , bufferSize , replication , blockSize , progress ) ) ; return fd ; }
Create new file . We start writing the data into underlying filessystem as well as the cacheFileSystem .
33,815
public int runAll ( ) { int exitCode = 0 ; if ( args == null ) { return run ( ) ; } for ( String src : args ) { try { Path srcPath = new Path ( src ) ; FileSystem fs = srcPath . getFileSystem ( getConf ( ) ) ; FileStatus [ ] statuses = fs . globStatus ( srcPath ) ; if ( statuses == null ) { System . err . println ( "Can not find listing for " + src ) ; exitCode = - 1 ; } else { for ( FileStatus s : statuses ) { try { run ( s . getPath ( ) ) ; } catch ( FileNotFoundException ex ) { if ( s . getPath ( ) . equals ( srcPath . makeQualified ( fs ) ) ) { throw ex ; } } } } } catch ( IOException e ) { exitCode = - 1 ; handleIOException ( e ) ; } } return exitCode ; }
For each source path execute the command
33,816
private void printToStdout ( InputStream in ) throws IOException { try { IOUtils . copyBytes ( in , System . out , getConf ( ) , false ) ; } finally { in . close ( ) ; } }
Print from src to stdout .
33,817
private void copyToLocal ( final FileSystem srcFS , final Path src , final File dst , final boolean copyCrc ) throws IOException { if ( ! srcFS . getFileStatus ( src ) . isDir ( ) ) { if ( dst . exists ( ) ) { throw new IOException ( "Target " + dst + " already exists" ) ; } File tmp = FileUtil . createLocalTempFile ( dst . getAbsoluteFile ( ) , COPYTOLOCAL_PREFIX , true ) ; if ( ! FileUtil . copy ( srcFS , src , tmp , false , srcFS . getConf ( ) ) ) { throw new IOException ( "Failed to copy " + src + " to " + dst ) ; } if ( ! tmp . renameTo ( dst ) ) { throw new IOException ( "Failed to rename tmp file " + tmp + " to local destination \"" + dst + "\"." ) ; } if ( copyCrc ) { if ( ! ( srcFS instanceof ChecksumFileSystem ) ) { throw new IOException ( "Source file system does not have crc files" ) ; } ChecksumFileSystem csfs = ( ChecksumFileSystem ) srcFS ; File dstcs = FileSystem . getLocal ( srcFS . getConf ( ) ) . pathToFile ( csfs . getChecksumFile ( new Path ( dst . getCanonicalPath ( ) ) ) ) ; copyToLocal ( csfs . getRawFileSystem ( ) , csfs . getChecksumFile ( src ) , dstcs , false ) ; } } else { dst . mkdirs ( ) ; for ( FileStatus path : srcFS . listStatus ( src ) ) { copyToLocal ( srcFS , path . getPath ( ) , new File ( dst , path . getPath ( ) . getName ( ) ) , copyCrc ) ; } } }
Copy a source file from a given file system to local destination .
33,818
private void tail ( String [ ] cmd , int pos ) throws IOException { CommandFormat c = new CommandFormat ( "tail" , 1 , 1 , "f" ) ; String src = null ; Path path = null ; try { List < String > parameters = c . parse ( cmd , pos ) ; src = parameters . get ( 0 ) ; } catch ( IllegalArgumentException iae ) { System . err . println ( "Usage: java FreightStreamer " + TAIL_USAGE ) ; throw iae ; } boolean foption = c . getOpt ( "f" ) ? true : false ; path = new Path ( src ) ; FileSystem srcFs = path . getFileSystem ( getConf ( ) ) ; if ( srcFs . isDirectory ( path ) ) { throw new IOException ( "Source must be a file." ) ; } long fileSize = srcFs . getFileStatus ( path ) . getLen ( ) ; long offset = ( fileSize > 1024 ) ? fileSize - 1024 : 0 ; while ( true ) { FSDataInputStream in = srcFs . open ( path ) ; in . seek ( offset ) ; IOUtils . copyBytes ( in , System . out , 1024 , false ) ; offset = in . getPos ( ) ; in . close ( ) ; if ( ! foption ) { break ; } fileSize = srcFs . getFileStatus ( path ) . getLen ( ) ; offset = ( fileSize > offset ) ? offset : fileSize ; try { Thread . sleep ( 5000 ) ; } catch ( InterruptedException e ) { break ; } } }
Parse the incoming command string
33,819
public static boolean supports ( final Feature f , final int lv ) { final EnumSet < Feature > set = map . get ( lv ) ; return set != null && set . contains ( f ) ; }
Returns true if a given feature is supported in the given layout version
33,820
private int doWork ( String [ ] args ) { if ( args . length == 1 ) { CommandHandler handler = Command . getHandler ( args [ 0 ] ) ; if ( handler != null ) { return handler . doWork ( this ) ; } } printUsage ( ) ; return - 1 ; }
Main method that runs the tool for given arguments .
33,821
public Vector < TaskInProgress > reportTasksInProgress ( boolean shouldBeMap , boolean shouldBeComplete ) { Vector < TaskInProgress > results = new Vector < TaskInProgress > ( ) ; TaskInProgress tips [ ] = null ; if ( shouldBeMap ) { tips = maps ; } else { tips = reduces ; } for ( int i = 0 ; i < tips . length ; i ++ ) { if ( tips [ i ] . isComplete ( ) == shouldBeComplete ) { results . add ( tips [ i ] ) ; } } return results ; }
Return a vector of completed TaskInProgress objects
33,822
public Vector < TaskInProgress > reportCleanupTIPs ( boolean shouldBeComplete ) { Vector < TaskInProgress > results = new Vector < TaskInProgress > ( ) ; for ( int i = 0 ; i < cleanup . length ; i ++ ) { if ( cleanup [ i ] . isComplete ( ) == shouldBeComplete ) { results . add ( cleanup [ i ] ) ; } } return results ; }
Return a vector of cleanup TaskInProgress objects
33,823
public Vector < TaskInProgress > reportSetupTIPs ( boolean shouldBeComplete ) { Vector < TaskInProgress > results = new Vector < TaskInProgress > ( ) ; for ( int i = 0 ; i < setup . length ; i ++ ) { if ( setup [ i ] . isComplete ( ) == shouldBeComplete ) { results . add ( setup [ i ] ) ; } } return results ; }
Return a vector of setup TaskInProgress objects
33,824
public TaskInProgress getTaskInProgress ( TaskID tipid ) { if ( tipid . isMap ( ) ) { if ( cleanup . length > 0 && tipid . equals ( cleanup [ 0 ] . getTIPId ( ) ) ) { return cleanup [ 0 ] ; } if ( setup . length > 0 && tipid . equals ( setup [ 0 ] . getTIPId ( ) ) ) { return setup [ 0 ] ; } for ( int i = 0 ; i < maps . length ; i ++ ) { if ( tipid . equals ( maps [ i ] . getTIPId ( ) ) ) { return maps [ i ] ; } } } else { if ( cleanup . length > 0 && tipid . equals ( cleanup [ 1 ] . getTIPId ( ) ) ) { return cleanup [ 1 ] ; } if ( setup . length > 0 && tipid . equals ( setup [ 1 ] . getTIPId ( ) ) ) { return setup [ 1 ] ; } for ( int i = 0 ; i < reduces . length ; i ++ ) { if ( tipid . equals ( reduces [ i ] . getTIPId ( ) ) ) { return reduces [ i ] ; } } } return null ; }
Return the TaskInProgress that matches the tipid .
33,825
public void close ( ) throws IOException , InterruptedException { LOG . debug ( "closing connection" ) ; stream . close ( ) ; uplink . closeConnection ( ) ; uplink . interrupt ( ) ; uplink . join ( ) ; }
Close the connection and shutdown the handler thread .
33,826
private void writeObject ( Writable obj ) throws IOException { if ( obj instanceof Text ) { Text t = ( Text ) obj ; int len = t . getLength ( ) ; WritableUtils . writeVInt ( stream , len ) ; stream . write ( t . getBytes ( ) , 0 , len ) ; } else if ( obj instanceof BytesWritable ) { BytesWritable b = ( BytesWritable ) obj ; int len = b . getLength ( ) ; WritableUtils . writeVInt ( stream , len ) ; stream . write ( b . getBytes ( ) , 0 , len ) ; } else { buffer . reset ( ) ; obj . write ( buffer ) ; int length = buffer . getLength ( ) ; WritableUtils . writeVInt ( stream , length ) ; stream . write ( buffer . getData ( ) , 0 , length ) ; } }
Write the given object to the stream . If it is a Text or BytesWritable write it directly . Otherwise write it to a buffer and then write the length and data to the stream .
33,827
public void stop ( ) { try { if ( server != null ) { server . stop ( ) ; server . join ( ) ; } } catch ( Exception e ) { LOG . warn ( "Got exception shutting down proxy" , e ) ; } }
Stop all server threads and wait for all to finish .
33,828
public Path getParent ( ) { String path = uri . getPath ( ) ; int lastSlash = path . lastIndexOf ( '/' ) ; int start = hasWindowsDrive ( path , true ) ? 3 : 0 ; if ( ( path . length ( ) == start ) || ( lastSlash == start && path . length ( ) == start + 1 ) ) { return null ; } String parent ; if ( lastSlash == - 1 ) { parent = CUR_DIR ; } else { int end = hasWindowsDrive ( path , true ) ? 3 : 0 ; parent = path . substring ( 0 , lastSlash == end ? end + 1 : lastSlash ) ; } return new Path ( uri . getScheme ( ) , uri . getAuthority ( ) , parent ) ; }
Returns the parent of a path or null if at root .
33,829
public Path makeQualified ( FileSystem fs ) { Path path = this ; if ( ! isAbsolute ( ) ) { FileSystem . LogForCollect . info ( "make Qualify non absolute path: " + this . toString ( ) + " working directory: " + fs . getWorkingDirectory ( ) ) ; path = new Path ( fs . getWorkingDirectory ( ) , this ) ; } URI pathUri = path . toUri ( ) ; URI fsUri = fs . getUri ( ) ; String scheme = pathUri . getScheme ( ) ; String authority = pathUri . getAuthority ( ) ; if ( scheme != null && ( authority != null || fsUri . getAuthority ( ) == null ) ) return path ; if ( scheme == null ) { scheme = fsUri . getScheme ( ) ; } if ( authority == null ) { authority = fsUri . getAuthority ( ) ; if ( authority == null ) { authority = "" ; } } return new Path ( scheme + ":" + "//" + authority + pathUri . getPath ( ) ) ; }
Returns a qualified path object .
33,830
public void configure ( JobConf job ) { this . inputFile = job . get ( "map.input.file" ) ; maxNumItems = job . getLong ( "aggregate.max.num.unique.values" , Long . MAX_VALUE ) ; }
get the input file name .
33,831
public static HardLinkFileInfo loadHardLinkFileInfo ( long hardLinkID , FSImageLoadingContext context ) { context . getFSDirectory ( ) . resetLastHardLinkIDIfLarge ( hardLinkID ) ; HardLinkFileInfo fileInfo = context . getHardLinkFileInfo ( hardLinkID ) ; if ( fileInfo == null ) { fileInfo = new HardLinkFileInfo ( hardLinkID ) ; context . associateHardLinkIDWithFileInfo ( hardLinkID , fileInfo ) ; } return fileInfo ; }
Create a HardLink file info if necessary and register to the hardLinkINodeIDToFileInfoMap And return the hardLinkFileInfo which is registered in the hardLinkINodeIDToFileInfoMap
33,832
public FileStatus getNextFile ( ) throws IOException { while ( ! doneTraversal ( ) ) { while ( ! stack . isEmpty ( ) ) { Node node = stack . peek ( ) ; if ( node . hasNext ( ) ) { FileStatus element = node . next ( ) ; if ( ! element . isDir ( ) ) { return element ; } try { pushNewNode ( element ) ; } catch ( FileNotFoundException e ) { } continue ; } else { stack . pop ( ) ; continue ; } } while ( ! paths . isEmpty ( ) ) { FileStatus next = paths . remove ( 0 ) ; pathIdx ++ ; if ( ! next . isDir ( ) ) { return next ; } try { pushNewNode ( next ) ; } catch ( FileNotFoundException e ) { continue ; } break ; } } return null ; }
Return the next file .
33,833
@ SuppressWarnings ( "unchecked" ) public int getPartition ( K key , V value , int numPartitions ) { return partitions . findPartition ( key ) ; }
by construction we know if our keytype
33,834
@ SuppressWarnings ( "unchecked" ) private K [ ] readPartitions ( FileSystem fs , Path p , Class < K > keyClass , JobConf job ) throws IOException { SequenceFile . Reader reader = new SequenceFile . Reader ( fs , p , job ) ; ArrayList < K > parts = new ArrayList < K > ( ) ; K key = ( K ) ReflectionUtils . newInstance ( keyClass , job ) ; NullWritable value = NullWritable . get ( ) ; while ( reader . next ( key , value ) ) { parts . add ( key ) ; key = ( K ) ReflectionUtils . newInstance ( keyClass , job ) ; } reader . close ( ) ; return parts . toArray ( ( K [ ] ) Array . newInstance ( keyClass , parts . size ( ) ) ) ; }
matching key types enforced by passing in
33,835
private TrieNode buildTrie ( BinaryComparable [ ] splits , int lower , int upper , byte [ ] prefix , int maxDepth ) { final int depth = prefix . length ; if ( depth >= maxDepth || lower == upper ) { return new LeafTrieNode ( depth , splits , lower , upper ) ; } InnerTrieNode result = new InnerTrieNode ( depth ) ; byte [ ] trial = Arrays . copyOf ( prefix , prefix . length + 1 ) ; int currentBound = lower ; for ( int ch = 0 ; ch < 255 ; ++ ch ) { trial [ depth ] = ( byte ) ( ch + 1 ) ; lower = currentBound ; while ( currentBound < upper ) { if ( splits [ currentBound ] . compareTo ( trial , 0 , trial . length ) >= 0 ) { break ; } currentBound += 1 ; } trial [ depth ] = ( byte ) ch ; result . child [ 0xFF & ch ] = buildTrie ( splits , lower , currentBound , trial , maxDepth ) ; } trial [ depth ] = 127 ; result . child [ 255 ] = buildTrie ( splits , currentBound , upper , trial , maxDepth ) ; return result ; }
Given a sorted set of cut points build a trie that will find the correct partition quickly .
33,836
private void mkdir ( IStructuredSelection selection ) { List < DFSFolder > folders = filterSelection ( DFSFolder . class , selection ) ; if ( folders . size ( ) >= 1 ) { DFSFolder folder = folders . get ( 0 ) ; InputDialog dialog = new InputDialog ( Display . getCurrent ( ) . getActiveShell ( ) , "Create subfolder" , "Enter the name of the subfolder" , "" , null ) ; if ( dialog . open ( ) == InputDialog . OK ) folder . mkdir ( dialog . getValue ( ) ) ; } }
Create a new sub - folder into an existing directory
33,837
private void open ( IStructuredSelection selection ) throws IOException , PartInitException , InvocationTargetException , InterruptedException { for ( DFSFile file : filterSelection ( DFSFile . class , selection ) ) { IStorageEditorInput editorInput = new DFSFileEditorInput ( file ) ; targetPart . getSite ( ) . getWorkbenchWindow ( ) . getActivePage ( ) . openEditor ( editorInput , "org.eclipse.ui.DefaultTextEditor" ) ; } }
Open the selected DfsPath in the editor window
33,838
synchronized float getAverageWaitMsecsPerHardAdmissionJob ( ) { float averageWaitMsecsPerHardAdmissionJob = - 1f ; if ( ! hardAdmissionMillisQueue . isEmpty ( ) ) { long totalWait = 0 ; for ( Long waitMillis : hardAdmissionMillisQueue ) { totalWait += waitMillis ; } averageWaitMsecsPerHardAdmissionJob = ( ( float ) totalWait ) / hardAdmissionMillisQueue . size ( ) ; } return averageWaitMsecsPerHardAdmissionJob ; }
Get the average waiting msecs per hard admission job entrance .
33,839
synchronized JobAdmissionWaitInfo getJobAdmissionWaitInfo ( JobInProgress job ) { Integer rank = jobToRank . get ( job ) ; int position = ( rank == null ) ? - 1 : rank ; float averageWaitMsecsPerHardAdmissionJob = getAverageWaitMsecsPerHardAdmissionJob ( ) ; return new JobAdmissionWaitInfo ( exceedTaskLimit ( ) , position , waitingQueue . size ( ) , averageWaitMsecsPerHardAdmissionJob , hardAdmissionMillisQueue . size ( ) ) ; }
Get the job admission wait info for a particular job .
33,840
protected void detectJournalManager ( ) throws IOException { int failures = 0 ; do { try { Stat stat = new Stat ( ) ; String primaryAddr = zk . getPrimaryAvatarAddress ( logicalName , stat , true , true ) ; if ( primaryAddr == null || primaryAddr . trim ( ) . isEmpty ( ) ) { primaryURI = null ; remoteJournalManager = null ; LOG . warn ( "Failover detected, wait for it to finish..." ) ; failures = 0 ; sleep ( FAILOVER_RETRY_SLEEP ) ; continue ; } primaryURI = addrToURI ( primaryAddr ) ; LOG . info ( "Read primary URI from zk: " + primaryURI ) ; if ( primaryURI . equals ( avatarZeroURI ) ) { remoteJournalManager = remoteJournalManagerZero ; } else if ( primaryURI . equals ( avatarOneURI ) ) { remoteJournalManager = remoteJournalManagerOne ; } else { LOG . warn ( "Invalid primaryURI: " + primaryURI ) ; primaryURI = null ; remoteJournalManager = null ; failures = 0 ; sleep ( FAILOVER_RETRY_SLEEP ) ; } } catch ( KeeperException kex ) { if ( KeeperException . Code . CONNECTIONLOSS == kex . code ( ) && failures < AvatarZooKeeperClient . ZK_CONNECTION_RETRIES ) { failures ++ ; sleep ( FAILOVER_RETRY_SLEEP ) ; continue ; } throwIOException ( kex . getMessage ( ) , kex ) ; } catch ( InterruptedException e ) { throwIOException ( e . getMessage ( ) , e ) ; } catch ( URISyntaxException e ) { throwIOException ( e . getMessage ( ) , e ) ; } } while ( remoteJournalManager == null ) ; }
Detect the primary node and the current Journal Manager ;
33,841
private void expandIfNecessary ( int size ) { if ( bytes . length >= size ) { return ; } int newlength = Math . max ( 2 * bytes . length , size ) ; bytes = Arrays . copyOf ( bytes , newlength ) ; }
Expand the underlying byte array to fit size bytes .
33,842
public void write ( byte b [ ] , int off , int len ) { expandIfNecessary ( count + len ) ; System . arraycopy ( b , off , bytes , count , len ) ; count += len ; }
Writes array of bytes into the output stream .
33,843
private void newLine ( ) { numBlocks = 0 ; perms = username = group = path = linkTarget = replication = hardlinkId = "" ; filesize = 0l ; type = INode . INodeType . REGULAR_INODE . toString ( ) ; inInode = true ; }
Start a new line of output reset values .
33,844
private Path [ ] getMapFiles ( FileSystem fs , boolean isLocal ) throws IOException { List < Path > fileList = new ArrayList < Path > ( ) ; if ( isLocal ) { for ( int i = 0 ; i < numMaps ; ++ i ) { fileList . add ( mapOutputFile . getInputFile ( i , getTaskID ( ) ) ) ; } } else { for ( FileStatus filestatus : mapOutputFilesOnDisk ) { fileList . add ( filestatus . getPath ( ) ) ; } } return fileList . toArray ( new Path [ 0 ] ) ; }
Get the input files for the reducer .
33,845
private static int getClosestPowerOf2 ( int value ) { if ( value <= 0 ) throw new IllegalArgumentException ( "Undefined for " + value ) ; final int hob = Integer . highestOneBit ( value ) ; return Integer . numberOfTrailingZeros ( hob ) + ( ( ( hob >>> 1 ) & value ) == 0 ? 0 : 1 ) ; }
Return the exponent of the power of two closest to the given positive value or zero if value leq 0 . This follows the observation that the msb of a given value is also the closest power of two unless the bit following it is set .
33,846
public void init ( ) throws IOException { try { if ( zooKeeper . exists ( zooKeeperParentPath , false ) == null ) { zooKeeper . create ( zooKeeperParentPath , new byte [ ] { '0' } , ZooDefs . Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; LOG . info ( "Created ZNode " + zooKeeperParentPath ) ; } if ( zooKeeper . exists ( ledgerParentPath , false ) == null ) { zooKeeper . create ( ledgerParentPath , new byte [ ] { '0' } , ZooDefs . Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; LOG . info ( "Created ZNode" + ledgerParentPath ) ; } } catch ( InterruptedException e ) { interruptedException ( "Interrupted ensuring that ZNodes " + zooKeeperParentPath + " and " + ledgerParentPath + " exist!" , e ) ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error ensuring that ZNodes " + zooKeeperParentPath + " and " + ledgerParentPath + " exist!" , e ) ; } }
Create znodes for storing ledger metadata if they have not been created before
33,847
public String fullyQualifiedPathForLedger ( EditLogLedgerMetadata e ) { String nameForLedger = nameForLedger ( e ) ; return fullyQualifiedPathForLedger ( nameForLedger ) ; }
Return the full ZNode path for a ZNode corresponding to a specific to a specific ledger s metadata .
33,848
public boolean deleteLedgerMetadata ( EditLogLedgerMetadata ledger , int version ) throws IOException { String ledgerPath = fullyQualifiedPathForLedger ( ledger ) ; try { zooKeeper . delete ( ledgerPath , version ) ; return true ; } catch ( KeeperException . NoNodeException e ) { LOG . warn ( ledgerPath + " does not exist. Returning false, ignoring " + e ) ; } catch ( KeeperException . BadVersionException e ) { keeperException ( "Unable to delete " + ledgerPath + ", version does not match." + " Updated by another process?" , e ) ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error deleting " + ledgerPath , e ) ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted deleting " + ledgerPath , e ) ; } return false ; }
Removes ledger - related Metadata from BookKeeper . Does not delete the ledger itself .
33,849
public boolean verifyEditLogLedgerMetadata ( EditLogLedgerMetadata metadata , String fullPathToVerify ) { Preconditions . checkNotNull ( metadata ) ; try { EditLogLedgerMetadata otherMetadata = readEditLogLedgerMetadata ( fullPathToVerify ) ; if ( otherMetadata == null ) { LOG . warn ( "No metadata found " + fullPathToVerify + "!" ) ; } if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "Verifying " + otherMetadata + " read from " + fullPathToVerify + " against " + metadata ) ; } return metadata . equals ( otherMetadata ) ; } catch ( IOException e ) { LOG . error ( "Unrecoverable error when verifying " + fullPathToVerify , e ) ; } return false ; }
Verify that the specified EditLogLedgerMetadata instance is the same as the EditLogLedgerMetadata object stored in the specified ZNode path .
33,850
public Collection < EditLogLedgerMetadata > listLedgers ( boolean includeInProgressLedgers ) throws IOException { TreeSet < EditLogLedgerMetadata > ledgers = new TreeSet < EditLogLedgerMetadata > ( ) ; try { List < String > ledgerNames = zooKeeper . getChildren ( ledgerParentPath , false ) ; for ( String ledgerName : ledgerNames ) { if ( ledgerName . endsWith ( BKJM_EDIT_CORRUPT ) ) { continue ; } if ( ! includeInProgressLedgers && ledgerName . contains ( BKJM_EDIT_INPROGRESS ) ) { continue ; } String fullLedgerMetadataPath = fullyQualifiedPathForLedger ( ledgerName ) ; EditLogLedgerMetadata metadata = readEditLogLedgerMetadata ( fullLedgerMetadataPath ) ; if ( metadata != null ) { if ( LOG . isTraceEnabled ( ) ) { LOG . trace ( "Read " + metadata + " from " + fullLedgerMetadataPath ) ; } ledgers . add ( metadata ) ; } else { LOG . warn ( "ZNode " + fullLedgerMetadataPath + " might have been finalized and deleted." ) ; } } } catch ( InterruptedException e ) { interruptedException ( "Interrupted listing ledgers under " + ledgerParentPath , e ) ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error listing ledgers " + "under " + ledgerParentPath , e ) ; } return ledgers ; }
List all ledgers in this instance s ZooKeeper namespace .
33,851
boolean doMerge ( String [ ] srcDataDirs , Collection < File > dstDataDirs , int namespaceId , NamespaceInfo nsInfo , StartupOption startOpt ) throws IOException { HashMap < File , File > dirsToMerge = new HashMap < File , File > ( ) ; int i = 0 ; for ( Iterator < File > it = dstDataDirs . iterator ( ) ; it . hasNext ( ) ; i ++ ) { File dstDataDir = it . next ( ) ; if ( dstDataDir . exists ( ) ) { continue ; } File srcDataDir = NameSpaceSliceStorage . getNsRoot ( namespaceId , new File ( srcDataDirs [ i ] , STORAGE_DIR_CURRENT ) ) ; if ( ! srcDataDir . exists ( ) || ! srcDataDir . isDirectory ( ) ) { LOG . info ( "Source data directory " + srcDataDir + " doesn't exist." ) ; continue ; } dirsToMerge . put ( srcDataDir , dstDataDir ) ; } if ( dirsToMerge . size ( ) == 0 ) return false ; if ( dirsToMerge . size ( ) != dstDataDirs . size ( ) ) { throw new IOException ( "Merge fail: not all directories are merged successfully." ) ; } MergeThread [ ] mergeThreads = new MergeThread [ dirsToMerge . size ( ) ] ; i = 0 ; for ( Map . Entry < File , File > entry : dirsToMerge . entrySet ( ) ) { MergeThread thread = new MergeThread ( entry . getKey ( ) , entry . getValue ( ) , nsInfo ) ; thread . start ( ) ; mergeThreads [ i ] = thread ; i ++ ; } for ( MergeThread thread : mergeThreads ) { try { thread . join ( ) ; } catch ( InterruptedException e ) { throw ( InterruptedIOException ) new InterruptedIOException ( ) . initCause ( e ) ; } } for ( MergeThread thread : mergeThreads ) { if ( thread . error != null ) throw new IOException ( thread . error ) ; } return true ; }
merge the data directory from srcDataDirs to dstDataDirs
33,852
void recoverTransitionRead ( DataNode datanode , int namespaceId , NamespaceInfo nsInfo , Collection < File > dataDirs , StartupOption startOpt , String nameserviceId ) throws IOException { Collection < File > nsDataDirs = new ArrayList < File > ( ) ; for ( Iterator < File > it = dataDirs . iterator ( ) ; it . hasNext ( ) ; ) { File dnRoot = it . next ( ) ; File nsRoot = NameSpaceSliceStorage . getNsRoot ( namespaceId , new File ( dnRoot , STORAGE_DIR_CURRENT ) ) ; nsDataDirs . add ( nsRoot ) ; } boolean merged = false ; String [ ] mergeDataDirs = nameserviceId == null ? null : datanode . getConf ( ) . getStrings ( "dfs.merge.data.dir." + nameserviceId ) ; if ( startOpt . equals ( StartupOption . REGULAR ) && mergeDataDirs != null && mergeDataDirs . length > 0 ) { assert mergeDataDirs . length == dataDirs . size ( ) ; merged = doMerge ( mergeDataDirs , nsDataDirs , namespaceId , nsInfo , startOpt ) ; } if ( ! merged ) { makeNameSpaceDataDir ( nsDataDirs ) ; } NameSpaceSliceStorage nsStorage = new NameSpaceSliceStorage ( namespaceId , this . getCTime ( ) , layoutMap ) ; nsStorage . recoverTransitionRead ( datanode , nsInfo , nsDataDirs , startOpt ) ; addNameSpaceStorage ( namespaceId , nsStorage ) ; }
recoverTransitionRead for a specific Name Space
33,853
public static void makeNameSpaceDataDir ( Collection < File > dataDirs ) throws IOException { for ( File data : dataDirs ) { try { DiskChecker . checkDir ( data ) ; } catch ( IOException e ) { LOG . warn ( "Invalid directory in: " + data . getCanonicalPath ( ) + ": " + e . getMessage ( ) ) ; } } }
Create physical directory for Name Spaces on the data node
33,854
private void doTransition ( List < StorageDirectory > sds , NamespaceInfo nsInfo , StartupOption startOpt ) throws IOException { if ( startOpt == StartupOption . ROLLBACK ) doRollback ( nsInfo ) ; int numOfDirs = sds . size ( ) ; List < StorageDirectory > dirsToUpgrade = new ArrayList < StorageDirectory > ( numOfDirs ) ; List < StorageInfo > dirsInfo = new ArrayList < StorageInfo > ( numOfDirs ) ; for ( StorageDirectory sd : sds ) { sd . read ( ) ; layoutMap . put ( sd . getRoot ( ) , this . layoutVersion ) ; checkVersionUpgradable ( this . layoutVersion ) ; assert this . layoutVersion >= FSConstants . LAYOUT_VERSION : "Future version is not allowed" ; boolean federationSupported = this . layoutVersion <= FSConstants . FEDERATION_VERSION ; if ( ! federationSupported && getNamespaceID ( ) != nsInfo . getNamespaceID ( ) ) { sd . unlock ( ) ; throw new IOException ( "Incompatible namespaceIDs in " + sd . getRoot ( ) . getCanonicalPath ( ) + ": namenode namespaceID = " + nsInfo . getNamespaceID ( ) + "; datanode namespaceID = " + getNamespaceID ( ) ) ; } if ( this . layoutVersion == FSConstants . LAYOUT_VERSION && this . cTime == nsInfo . getCTime ( ) ) continue ; verifyDistributedUpgradeProgress ( nsInfo ) ; if ( this . layoutVersion > FSConstants . LAYOUT_VERSION && this . layoutVersion > FSConstants . FEDERATION_VERSION ) { if ( isNsLevelUpgraded ( getNamespaceID ( ) , sd ) ) { throw new IOException ( "Ns level directory already upgraded for : " + sd . getRoot ( ) + " ignoring upgrade" ) ; } dirsToUpgrade . add ( sd ) ; dirsInfo . add ( new StorageInfo ( this ) ) ; continue ; } if ( this . cTime >= nsInfo . getCTime ( ) ) { sd . unlock ( ) ; throw new IOException ( "Datanode state: LV = " + this . getLayoutVersion ( ) + " CTime = " + this . getCTime ( ) + " is newer than the namespace state: LV = " + nsInfo . getLayoutVersion ( ) + " CTime = " + nsInfo . getCTime ( ) ) ; } } if ( ! dirsToUpgrade . isEmpty ( ) ) { doUpgrade ( dirsToUpgrade , dirsInfo , nsInfo ) ; } }
Analyze which and whether a transition of the fs state is required and perform it if necessary .
33,855
private void addNameSpaceStorage ( int nsID , NameSpaceSliceStorage nsStorage ) throws IOException { if ( ! this . nsStorageMap . containsKey ( nsID ) ) { this . nsStorageMap . put ( nsID , nsStorage ) ; } }
Add nsStorage into nsStorageMap
33,856
boolean contains ( DatanodeDescriptor node ) { if ( node == null ) { return false ; } String host = node . getHost ( ) ; hostmapLock . readLock ( ) . lock ( ) ; try { DatanodeDescriptor [ ] nodes = map . get ( host ) ; if ( nodes != null ) { for ( DatanodeDescriptor containedNode : nodes ) { if ( node == containedNode ) { return true ; } } } } finally { hostmapLock . readLock ( ) . unlock ( ) ; } return false ; }
Check if node is already in the map .
33,857
boolean add ( DatanodeDescriptor node ) { hostmapLock . writeLock ( ) . lock ( ) ; try { if ( node == null || contains ( node ) ) { return false ; } String host = node . getHost ( ) ; DatanodeDescriptor [ ] nodes = map . get ( host ) ; DatanodeDescriptor [ ] newNodes ; if ( nodes == null ) { newNodes = new DatanodeDescriptor [ 1 ] ; newNodes [ 0 ] = node ; } else { newNodes = new DatanodeDescriptor [ nodes . length + 1 ] ; System . arraycopy ( nodes , 0 , newNodes , 0 , nodes . length ) ; newNodes [ nodes . length ] = node ; } map . put ( host , newNodes ) ; return true ; } finally { hostmapLock . writeLock ( ) . unlock ( ) ; } }
add node to the map return true if the node is added ; false otherwise .
33,858
boolean remove ( DatanodeDescriptor node ) { if ( node == null ) { return false ; } String host = node . getHost ( ) ; hostmapLock . writeLock ( ) . lock ( ) ; try { DatanodeDescriptor [ ] nodes = map . get ( host ) ; if ( nodes == null ) { return false ; } if ( nodes . length == 1 ) { if ( nodes [ 0 ] == node ) { map . remove ( host ) ; return true ; } else { return false ; } } int i = 0 ; for ( ; i < nodes . length ; i ++ ) { if ( nodes [ i ] == node ) { break ; } } if ( i == nodes . length ) { return false ; } else { DatanodeDescriptor [ ] newNodes ; newNodes = new DatanodeDescriptor [ nodes . length - 1 ] ; System . arraycopy ( nodes , 0 , newNodes , 0 , i ) ; System . arraycopy ( nodes , i + 1 , newNodes , i , nodes . length - i - 1 ) ; map . put ( host , newNodes ) ; return true ; } } finally { hostmapLock . writeLock ( ) . unlock ( ) ; } }
remove node from the map return true if the node is removed ; false otherwise .
33,859
public DatanodeDescriptor getDatanodeByName ( String name ) { if ( name == null ) { return null ; } int colon = name . indexOf ( ":" ) ; String host ; if ( colon < 0 ) { host = name ; } else { host = name . substring ( 0 , colon ) ; } hostmapLock . readLock ( ) . lock ( ) ; try { DatanodeDescriptor [ ] nodes = map . get ( host ) ; if ( nodes == null ) { return null ; } for ( DatanodeDescriptor containedNode : nodes ) { if ( name . equals ( containedNode . getName ( ) ) ) { return containedNode ; } } return null ; } finally { hostmapLock . readLock ( ) . unlock ( ) ; } }
Find data node by its name .
33,860
public void reduce ( Text arg0 , Iterator < Text > arg1 , OutputCollector < Text , Text > arg2 , Reporter arg3 ) throws IOException { throw new IOException ( "should not be called\n" ) ; }
Do nothing . Should not be called .
33,861
public synchronized Map < TaskError , Integer > getRecentErrorCounts ( long timeWindow ) { long start = System . currentTimeMillis ( ) - timeWindow ; Map < TaskError , Integer > errorCounts = createErrorCountsMap ( ) ; Iterator < Map < TaskError , Integer > > errorCountsIter = errorCountsQueue . iterator ( ) ; Iterator < Long > startTimeIter = startTimeQueue . iterator ( ) ; while ( errorCountsIter . hasNext ( ) && start < startTimeIter . next ( ) ) { Map < TaskError , Integer > windowErrorCounts = errorCountsIter . next ( ) ; for ( Map . Entry < TaskError , Integer > entry : windowErrorCounts . entrySet ( ) ) { errorCounts . put ( entry . getKey ( ) , errorCounts . get ( entry . getKey ( ) ) + entry . getValue ( ) ) ; } } return errorCounts ; }
Get recent TaskError counts within the given window
33,862
private Map < String , TaskError > parseConfigFile ( URL configURL ) { Map < String , TaskError > knownErrors = new LinkedHashMap < String , TaskError > ( ) ; try { Element root = getRootElement ( configURL ) ; NodeList elements = root . getChildNodes ( ) ; for ( int i = 0 ; i < elements . getLength ( ) ; ++ i ) { Node node = elements . item ( i ) ; if ( ! ( node instanceof Element ) ) { continue ; } Element element = ( Element ) node ; if ( matched ( element , "error" ) ) { String name = element . getAttribute ( "name" ) ; String pattern = "" ; String description = "" ; NodeList fields = element . getChildNodes ( ) ; for ( int j = 0 ; j < fields . getLength ( ) ; ++ j ) { Node fieldNode = fields . item ( j ) ; if ( ! ( fieldNode instanceof Element ) ) { continue ; } Element field = ( Element ) fieldNode ; if ( matched ( field , "pattern" ) ) { pattern = getText ( field ) ; } else if ( matched ( field , "description" ) ) { description = getText ( field ) ; } } TaskError taskError = new TaskError ( name , pattern , description ) ; LOG . info ( "Adding TaskError " + taskError ) ; knownErrors . put ( name , taskError ) ; } } } catch ( IOException ie ) { LOG . error ( "Error parsing config file " + configURL , ie ) ; } return knownErrors ; }
Parse the error . xml file which contains the error
33,863
public void processBadResource ( int grant , boolean abandonHost ) { synchronized ( lockObject ) { Set < String > excludedHosts = null ; TaskInProgress tip = requestToTipMap . get ( grant ) ; if ( ! job . canLaunchJobCleanupTask ( ) && ( ! tip . isRunnable ( ) || ( tip . isRunning ( ) && ! ( speculatedMaps . contains ( tip ) || speculatedReduces . contains ( tip ) ) ) ) ) { resourceTracker . releaseResource ( grant ) ; return ; } if ( abandonHost ) { ResourceGrant resource = resourceTracker . getGrant ( grant ) ; String hostToExlcude = resource . getAddress ( ) . getHost ( ) ; taskToContextMap . get ( tip ) . excludedHosts . add ( hostToExlcude ) ; excludedHosts = taskToContextMap . get ( tip ) . excludedHosts ; } ResourceRequest newReq = resourceTracker . releaseAndRequestResource ( grant , excludedHosts ) ; requestToTipMap . put ( newReq . getId ( ) , tip ) ; TaskContext context = taskToContextMap . get ( tip ) ; if ( context == null ) { context = new TaskContext ( newReq ) ; } else { context . resourceRequests . add ( newReq ) ; } taskToContextMap . put ( tip , context ) ; } }
Return this grant and request a different one . This can happen because the task has failed was killed or the job tracker decided that the resource is bad
33,864
private void updateTaskStatuses ( TaskTrackerStatus status ) { TaskTrackerInfo trackerInfo = TaskTrackerInfo . fromStatus ( status ) ; String trackerName = status . getTrackerName ( ) ; for ( TaskStatus report : status . getTaskReports ( ) ) { report . setTaskTracker ( trackerName ) ; LOG . debug ( "Task status report: " + report ) ; updateTaskStatus ( trackerInfo , report ) ; setupReduceRequests ( job ) ; processFetchFailures ( report ) ; } }
Updates job and tasks state according to report from TaskTracker
33,865
private void updateTaskStatus ( TaskTrackerInfo info , TaskStatus report ) { TaskAttemptID taskId = report . getTaskID ( ) ; if ( ! this . jobId . equals ( taskId . getJobID ( ) ) ) { LOG . warn ( "Task " + taskId + " belongs to unknown job " + taskId . getJobID ( ) ) ; return ; } TaskInProgress tip = taskLookupTable . getTIP ( taskId ) ; if ( tip == null ) { return ; } TaskStatus status = tip . getTaskStatus ( taskId ) ; TaskStatus . State knownState = ( status == null ) ? null : status . getRunState ( ) ; if ( report . getRunState ( ) != TaskStatus . State . UNASSIGNED ) { expireTasks . removeTask ( taskId ) ; } if ( report . getRunState ( ) == TaskStatus . State . RUNNING && ! TaskStatus . TERMINATING_STATES . contains ( knownState ) ) { expireTasks . updateTask ( taskId ) ; } job . updateTaskStatus ( tip , ( TaskStatus ) report . clone ( ) , info ) ; }
Updates job and tasks state according to TaskStatus from given TaskTracker . This function only updates internal job state it shall NOT issue any actions directly .
33,866
private void saveNewRequestForTip ( TaskInProgress tip , ResourceRequest req ) { requestToTipMap . put ( req . getId ( ) , tip ) ; TaskContext context = taskToContextMap . get ( tip ) ; if ( context == null ) { context = new TaskContext ( req ) ; } else { context . resourceRequests . add ( req ) ; } taskToContextMap . put ( tip , context ) ; }
Saves new request for given tip no recording in resource tracker happens
33,867
public JobID getNewJobId ( ) throws IOException { int value = jobCounter . incrementAndGet ( ) ; if ( value > 1 ) { throw new RuntimeException ( "CoronaJobTracker can only run one job! (value=" + value + ")" ) ; } createSession ( ) ; jobId = jobIdFromSessionId ( sessionId ) ; return jobId ; }
Returns a unique JobID for a new job . CoronaJobTracker can only run a single job and it s id is fixed a - priori
33,868
private void dispatchCommitActions ( List < CommitTaskAction > commitActions ) throws IOException { if ( ! commitActions . isEmpty ( ) ) { TaskAttemptID [ ] wasCommitting ; try { wasCommitting = commitPermissionClient . getAndSetCommitting ( commitActions ) ; } catch ( IOException e ) { LOG . error ( "Commit permission client is faulty - killing this JT" ) ; try { close ( false ) ; } catch ( InterruptedException e1 ) { throw new IOException ( e1 ) ; } throw e ; } int i = 0 ; for ( CommitTaskAction action : commitActions ) { TaskAttemptID oldCommitting = wasCommitting [ i ] ; if ( oldCommitting != null ) { failTask ( oldCommitting , "Unknown committing attempt" , false ) ; } TaskAttemptID newToCommit = action . getTaskID ( ) ; if ( ! newToCommit . equals ( oldCommitting ) ) { String trackerName = taskLookupTable . getAssignedTracker ( newToCommit ) ; taskLauncher . commitTask ( trackerName , resourceTracker . getTrackerAddr ( trackerName ) , action ) ; } else { LOG . warn ( "Repeated try to commit same attempt id. Ignoring" ) ; } ++ i ; } } }
Executes actions that can be executed after asking commit permission authority
33,869
private boolean isMatchingJobId ( JobID jobId ) { if ( isStandalone ) { return this . jobId . equals ( jobId ) ; } else { return this . jobId . equals ( getMainJobID ( jobId ) ) ; } }
Check if given id matches job id of this JT .
33,870
public void expiredLaunchingTask ( TaskAttemptID taskId ) { synchronized ( lockObject ) { String trackerName = taskLookupTable . getAssignedTracker ( taskId ) ; trackerStats . recordTimeout ( trackerName ) ; localJTSubmitter . submit ( new TaskTimeout ( trackerName ) ) ; failTask ( taskId , "Error launching task" , false ) ; } }
Handle a task that could not be launched .
33,871
public void prepareFailover ( ) { if ( ! RemoteJTProxy . isJTRestartingEnabled ( conf ) ) { return ; } LOG . info ( "prepareFailover done" ) ; this . isPurgingJob = false ; if ( this . parentHeartbeat != null ) { this . interTrackerServer . stop ( ) ; } }
Some perparation job needed by remote job tracker for failover
33,872
public int go ( ) throws IOException { try { return run ( argv_ ) ; } catch ( Exception ex ) { throw new IOException ( ex . getMessage ( ) ) ; } }
This is the method that actually initializes the job conf and submits the job to the jobtracker
33,873
protected void listJobConfProperties ( ) { msg ( "==== JobConf properties:" ) ; Iterator it = jobConf_ . iterator ( ) ; TreeMap sorted = new TreeMap ( ) ; while ( it . hasNext ( ) ) { Map . Entry en = ( Map . Entry ) it . next ( ) ; sorted . put ( en . getKey ( ) , en . getValue ( ) ) ; } it = sorted . entrySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { Map . Entry en = ( Map . Entry ) it . next ( ) ; msg ( en . getKey ( ) + "=" + en . getValue ( ) ) ; } msg ( "====" ) ; }
Prints out the jobconf properties on stdout when verbose is specified .
33,874
public int submitAndMonitorJob ( ) throws IOException { if ( jar_ != null && isLocalHadoop ( ) ) { File wd = new File ( "." ) . getAbsoluteFile ( ) ; StreamUtil . unJar ( new File ( jar_ ) , wd ) ; } jc_ = new JobClient ( jobConf_ ) ; boolean error = true ; running_ = null ; String lastReport = null ; try { running_ = jc_ . submitJob ( jobConf_ ) ; jobId_ = running_ . getID ( ) ; LOG . info ( "getLocalDirs(): " + Arrays . asList ( jobConf_ . getLocalDirs ( ) ) ) ; LOG . info ( "Running job: " + jobId_ ) ; jobInfo ( ) ; while ( ! running_ . isComplete ( ) ) { try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { } running_ = jc_ . getJob ( jobId_ ) ; String report = null ; report = " map " + Math . round ( running_ . mapProgress ( ) * 100 ) + "% reduce " + Math . round ( running_ . reduceProgress ( ) * 100 ) + "%" ; if ( ! report . equals ( lastReport ) ) { LOG . info ( report ) ; lastReport = report ; } } if ( ! running_ . isSuccessful ( ) ) { jobInfo ( ) ; LOG . error ( "Job not Successful!" ) ; return 1 ; } LOG . info ( "Job complete: " + jobId_ ) ; LOG . info ( "Output: " + output_ ) ; error = false ; } catch ( FileNotFoundException fe ) { LOG . error ( "Error launching job , bad input path : " + fe . getMessage ( ) ) ; return 2 ; } catch ( InvalidJobConfException je ) { LOG . error ( "Error launching job , Invalid job conf : " + je . getMessage ( ) ) ; return 3 ; } catch ( FileAlreadyExistsException fae ) { LOG . error ( "Error launching job , Output path already exists : " + fae . getMessage ( ) ) ; return 4 ; } catch ( IOException ioe ) { LOG . error ( "Error Launching job : " + ioe . getMessage ( ) ) ; return 5 ; } finally { if ( error && ( running_ != null ) ) { LOG . info ( "killJob..." ) ; running_ . killJob ( ) ; } jc_ . close ( ) ; } return 0 ; }
Based on JobClient
33,875
private void checkIfLastPacketTimeout ( ) { synchronized ( ackQueue ) { if ( ! ackQueue . isEmpty ( ) && ( System . currentTimeMillis ( ) - lastPacketSentTime > packetTimeout ) ) { DFSClient . LOG . warn ( "Packet " + ackQueue . getLast ( ) . seqno + " of " + block + " is timed out" ) ; } } }
Check if the last outstanding packet has not received an ack before it is timed out . If true for now just log it . We will provide a decent solution to this later on .
33,876
private boolean setupPipelineForAppend ( LocatedBlock lastBlock ) throws IOException { if ( nodes == null || nodes . length == 0 ) { String msg = "Could not get block locations. " + "Source file \"" + src + "\" - Aborting..." ; DFSClient . LOG . warn ( msg ) ; setLastException ( new IOException ( msg ) ) ; closed = true ; if ( streamer != null ) streamer . close ( ) ; return false ; } boolean success = createBlockOutputStream ( nodes , dfsClient . clientName , false , true ) ; long oldGenerationStamp = ( ( LocatedBlockWithOldGS ) lastBlock ) . getOldGenerationStamp ( ) ; if ( success ) { Block newBlock = lastBlock . getBlock ( ) ; Block oldBlock = new Block ( newBlock . getBlockId ( ) , newBlock . getNumBytes ( ) , oldGenerationStamp ) ; dfsClient . namenode . updatePipeline ( dfsClient . clientName , oldBlock , newBlock , nodes ) ; } else { DFSClient . LOG . warn ( "Fall back to block recovery process when trying" + " to setup the append pipeline for file " + src ) ; block . setGenerationStamp ( oldGenerationStamp ) ; while ( processDatanodeError ( true , true ) ) { try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { lastException = new IOException ( e ) ; break ; } } } return success ; }
Setup the Append pipeline the length of current pipeline will shrink if any datanodes are dead during the process .
33,877
private DatanodeInfo [ ] nextBlockOutputStream ( String client ) throws IOException { LocatedBlock lb = null ; boolean retry = false ; DatanodeInfo [ ] nodes ; ArrayList < DatanodeInfo > excludedNodes = new ArrayList < DatanodeInfo > ( ) ; int count = dfsClient . conf . getInt ( "dfs.client.block.write.retries" , 3 ) ; boolean success ; do { hasError = false ; lastException = null ; errorIndex = 0 ; retry = false ; nodes = null ; success = false ; long startTime = System . currentTimeMillis ( ) ; DatanodeInfo [ ] excluded = excludedNodes . toArray ( new DatanodeInfo [ 0 ] ) ; lb = locateFollowingBlock ( startTime , excluded . length > 0 ? excluded : null ) ; block = lb . getBlock ( ) ; nodes = lb . getLocations ( ) ; success = createBlockOutputStream ( nodes , dfsClient . clientName , false , false ) ; if ( ! success ) { DFSClient . LOG . info ( "Abandoning block " + block + " for file " + src ) ; dfsClient . namenode . abandonBlock ( block , src , dfsClient . clientName ) ; if ( errorIndex < nodes . length ) { DFSClient . LOG . debug ( "Excluding datanode " + nodes [ errorIndex ] ) ; excludedNodes . add ( nodes [ errorIndex ] ) ; } retry = true ; } } while ( retry && -- count >= 0 ) ; if ( ! success && nodes != null ) { while ( nodes . length > 1 && ! success ) { if ( errorIndex >= nodes . length ) { break ; } DatanodeInfo [ ] remainingNodes = new DatanodeInfo [ nodes . length - 1 ] ; for ( int i = 0 ; i < errorIndex ; i ++ ) { remainingNodes [ i ] = nodes [ i ] ; } for ( int i = errorIndex + 1 ; i < nodes . length ; i ++ ) { remainingNodes [ i - 1 ] = nodes [ i ] ; } nodes = remainingNodes ; success = createBlockOutputStream ( nodes , dfsClient . clientName , false , false ) ; } } if ( ! success ) { throw new IOException ( "Unable to create new block." ) ; } return nodes ; }
Open a DataOutputStream to a DataNode so that it can be written to . This happens when a file is created and each time a new block is allocated . Must get block ID and the IDs of the destinations from the namenode . Returns the list of target datanodes .
33,878
public void sync ( ) throws IOException { long start = System . currentTimeMillis ( ) ; try { long toWaitFor ; synchronized ( this ) { eventStartSync ( ) ; long saveOffset = bytesCurBlock ; DFSOutputStreamPacket oldCurrentPacket = currentPacket ; flushBuffer ( false , shouldKeepPartialChunkData ( ) ) ; eventSyncStartWaitAck ( ) ; if ( DFSClient . LOG . isDebugEnabled ( ) ) { DFSClient . LOG . debug ( "DFSClient flush() : bytesCurBlock " + bytesCurBlock + " lastFlushOffset " + lastFlushOffset ) ; } if ( lastFlushOffset != bytesCurBlock ) { assert bytesCurBlock > lastFlushOffset ; lastFlushOffset = bytesCurBlock ; enqueueCurrentPacket ( ) ; } else { if ( oldCurrentPacket == null && currentPacket != null ) { currentSeqno -- ; } currentPacket = null ; } if ( shouldKeepPartialChunkData ( ) ) { bytesCurBlock = saveOffset ; } toWaitFor = lastQueuedSeqno ; } waitForAckedSeqno ( toWaitFor ) ; eventSyncPktAcked ( ) ; boolean willPersist ; synchronized ( this ) { willPersist = persistBlocks ; persistBlocks = false ; } if ( willPersist ) { dfsClient . namenode . fsync ( src , dfsClient . clientName ) ; } long timeval = System . currentTimeMillis ( ) - start ; dfsClient . metrics . incSyncTime ( timeval ) ; eventEndSync ( ) ; } catch ( IOException e ) { lastException = new IOException ( "IOException flush:" , e ) ; closed = true ; closeThreads ( ) ; throw e ; } }
All data is written out to datanodes . It is not guaranteed that data has been flushed to persistent store on the datanode . Block allocations are persisted on namenode .
33,879
private void flushInternal ( ) throws IOException { isClosed ( ) ; dfsClient . checkOpen ( ) ; long toWaitFor ; synchronized ( this ) { enqueueCurrentPacket ( ) ; toWaitFor = lastQueuedSeqno ; } waitForAckedSeqno ( toWaitFor ) ; }
Waits till all existing data is flushed and confirmations received from datanodes .
33,880
private void closeThreads ( ) throws IOException { try { if ( streamer != null ) { streamer . close ( ) ; streamer . join ( ) ; } if ( response != null ) { response . close ( ) ; response . join ( ) ; response = null ; } } catch ( InterruptedException e ) { throw new InterruptedIOException ( "Failed to shutdown response thread" ) ; } }
shutdown datastreamer and responseprocessor threads .
33,881
public void output ( K key , V value ) throws IOException { collector . collect ( key , value ) ; }
The task output a normal record .
33,882
public void partitionedOutput ( int reduce , K key , V value ) throws IOException { PipesPartitioner . setNextPartition ( reduce ) ; collector . collect ( key , value ) ; }
The task output a record with a partition number attached .
33,883
public void progress ( float progress ) throws IOException { progressValue = progress ; reporter . progress ( ) ; if ( recordReader != null ) { progressKey . set ( progress ) ; recordReader . next ( progressKey , nullValue ) ; } }
Update the amount done and call progress on the reporter .
33,884
public synchronized boolean waitForFinish ( ) throws Throwable { while ( ! done && exception == null ) { wait ( ) ; } if ( exception != null ) { throw exception ; } return done ; }
Wait for the task to finish or abort .
33,885
private Map < Integer , DiskScanInfo [ ] > getDiskReportPerNamespace ( ) { if ( dataset . volumes == null ) { LOG . warn ( "Dataset volumes are not initialized yet" ) ; return new HashMap < Integer , DiskScanInfo [ ] > ( ) ; } FSVolume [ ] volumes = dataset . volumes . getVolumes ( ) ; ScanInfoListPerNamespace [ ] volumeReports = new ScanInfoListPerNamespace [ volumes . length ] ; Map < Integer , Future < ScanInfoListPerNamespace > > volumeCompilers = new HashMap < Integer , Future < ScanInfoListPerNamespace > > ( ) ; for ( int i = 0 ; i < volumes . length ; i ++ ) { if ( dataset . volumes . isValid ( volumes [ i ] ) ) { ReportCompiler reportCompiler = new ReportCompiler ( volumes [ i ] , datanode ) ; Future < ScanInfoListPerNamespace > result = reportCompileThreadPool . submit ( reportCompiler ) ; volumeCompilers . put ( i , result ) ; } } for ( Entry < Integer , Future < ScanInfoListPerNamespace > > e : volumeCompilers . entrySet ( ) ) { try { int volume = e . getKey ( ) ; volumeReports [ volume ] = e . getValue ( ) . get ( ) ; } catch ( Exception ex ) { LOG . error ( "Error compiling report" , ex ) ; throw new RuntimeException ( ex ) ; } } ScanInfoListPerNamespace list = new ScanInfoListPerNamespace ( ) ; for ( int i = 0 ; i < volumes . length ; i ++ ) { if ( dataset . volumes . isValid ( volumes [ i ] ) ) { list . addAll ( volumeReports [ i ] ) ; } } return list . toSortedArrays ( ) ; }
Get lists of blocks on the disk sorted by blockId per namespace
33,886
void checkDifferenceAndReconcile ( ) { resetDiffsAndStats ( ) ; checkDifference ( ) ; for ( Entry < Integer , LinkedList < ScanDifference > > entry : diffsPerNamespace . entrySet ( ) ) { Integer namespaceId = entry . getKey ( ) ; LinkedList < ScanDifference > diff = entry . getValue ( ) ; for ( ScanDifference info : diff ) { try { dataset . checkAndUpdate ( namespaceId , delta , info ) ; } catch ( IOException e ) { LOG . warn ( "Cannot reconcile block " + info . toString ( ) , e ) ; } } } }
Reconcile differences between disk and in - memory blocks
33,887
public static File createJarPackage ( IResource resource ) { JarModule jarModule = new JarModule ( resource ) ; try { PlatformUI . getWorkbench ( ) . getProgressService ( ) . run ( false , true , jarModule ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; return null ; } File jarFile = jarModule . getJarFile ( ) ; if ( jarFile == null ) { ErrorMessageDialog . display ( "Run on Hadoop" , "Unable to create or locate the JAR file for the Job" ) ; return null ; } return jarFile ; }
Static way to create a JAR package for the given resource and showing a progress bar
33,888
DatanodeDescriptor [ ] getValidTargets ( ) { if ( targetGSs == null ) { return null ; } int count = 0 ; long lastBlockGS = this . getLastBlock ( ) . getGenerationStamp ( ) ; for ( long targetGS : targetGSs ) { if ( lastBlockGS == targetGS ) { count ++ ; } } if ( count == 0 ) { return null ; } if ( count == targets . length ) { return targets ; } else { DatanodeDescriptor [ ] validTargets = new DatanodeDescriptor [ count ] ; for ( int i = 0 , numOfValidTargets = 0 ; i < targets . length ; i ++ ) { if ( lastBlockGS == targetGSs [ i ] ) { validTargets [ numOfValidTargets ++ ] = targets [ i ] ; if ( numOfValidTargets == count ) { return validTargets ; } } } return validTargets ; } }
Return the targets with generation stamp matching that of the last block
33,889
void setTargets ( DatanodeDescriptor [ ] locs , long generationStamp ) { setTargets ( locs ) ; if ( locs == null ) { targetGSs = null ; return ; } long [ ] targetGSs = new long [ locs . length ] ; for ( int i = 0 ; i < targetGSs . length ; i ++ ) { targetGSs [ i ] = generationStamp ; } this . targetGSs = targetGSs ; }
Set targets for list of replicas all sharing the same generationStamp
33,890
boolean addTarget ( DatanodeDescriptor node , long generationStamp ) { if ( this . targets == null ) { this . targets = new DatanodeDescriptor [ 0 ] ; } for ( int i = 0 ; i < targets . length ; i ++ ) { if ( targets [ i ] . equals ( node ) ) { if ( generationStamp != targetGSs [ i ] ) { targetGSs [ i ] = generationStamp ; return true ; } return false ; } } if ( node != null ) { node . addINode ( this ) ; } DatanodeDescriptor [ ] newt = new DatanodeDescriptor [ targets . length + 1 ] ; long [ ] newgs = new long [ targets . length + 1 ] ; for ( int i = 0 ; i < targets . length ; i ++ ) { newt [ i ] = this . targets [ i ] ; newgs [ i ] = this . targetGSs [ i ] ; } newt [ targets . length ] = node ; newgs [ targets . length ] = generationStamp ; this . targets = newt ; this . targetGSs = newgs ; this . primaryNodeIndex = - 1 ; return true ; }
add this target if it does not already exists . Returns true if the target was added .
33,891
void assignPrimaryDatanode ( ) { if ( targets . length == 0 ) { NameNode . stateChangeLog . warn ( "BLOCK*" + " INodeFileUnderConstruction.initLeaseRecovery:" + " No blocks found, lease removed." ) ; } int previous = primaryNodeIndex ; Block lastBlock = this . getLastBlock ( ) ; for ( int i = 1 ; i <= targets . length ; i ++ ) { int j = ( previous + i ) % targets . length ; if ( targets [ j ] . isAlive ) { DatanodeDescriptor primary = targets [ primaryNodeIndex = j ] ; primary . addBlockToBeRecovered ( lastBlock , targets ) ; NameNode . stateChangeLog . info ( "BLOCK* " + lastBlock + " recovery started, primary=" + primary ) ; return ; } } }
Initialize lease recovery for this object
33,892
synchronized boolean setLastRecoveryTime ( long now ) { boolean expired = now - lastRecoveryTime > NameNode . LEASE_RECOVER_PERIOD ; if ( expired ) { lastRecoveryTime = now ; } return expired ; }
Update lastRecoveryTime if expired .
33,893
int collectSubtreeBlocksAndClear ( List < BlockInfo > v , int blocksLimit , List < INode > removedINodes ) { clearTargets ( ) ; return super . collectSubtreeBlocksAndClear ( v , blocksLimit , removedINodes ) ; }
When deleting an open file we should remove it from the list of its targets .
33,894
private void removeINodeFromDatanodeDescriptors ( DatanodeDescriptor [ ] targets ) { if ( targets != null ) { for ( DatanodeDescriptor node : targets ) { node . removeINode ( this ) ; } } }
Remove this INodeFileUnderConstruction from the list of datanodes .
33,895
private void addINodeToDatanodeDescriptors ( DatanodeDescriptor [ ] targets ) { if ( targets != null ) { for ( DatanodeDescriptor node : targets ) { node . addINode ( this ) ; } } }
Add this INodeFileUnderConstruction to the list of datanodes .
33,896
protected boolean combine ( Object [ ] srcs , TupleWritable dst ) { assert srcs . length == dst . size ( ) ; return true ; }
Emit everything from the collector .
33,897
public CoronaTaskTrackerProtocol getClient ( String host , int port ) throws IOException { String key = makeKey ( host , port ) ; Node ttNode = topologyCache . getNode ( host ) ; CoronaTaskTrackerProtocol client = null ; synchronized ( ttNode ) { client = trackerClients . get ( key ) ; if ( client == null ) { client = createClient ( host , port ) ; trackerClients . put ( key , client ) ; } } return client ; }
API to get the RPC client .
33,898
private CoronaTaskTrackerProtocol createClient ( String host , int port ) throws IOException { String staticHost = NetUtils . getStaticResolution ( host ) ; InetSocketAddress s = null ; InetAddress inetAddress = null ; byte [ ] byteArr = null ; if ( staticHost != null ) { inetAddress = InetAddress . getByName ( staticHost ) ; } else { byteArr = Utilities . asBytes ( host ) ; if ( byteArr == null ) { inetAddress = InetAddress . getByName ( host ) ; } else { inetAddress = InetAddress . getByAddress ( byteArr ) ; } } s = new InetSocketAddress ( inetAddress , port ) ; LOG . info ( "Creating client to " + ( staticHost != null ? staticHost : host ) + ":" + s . getPort ( ) ) ; long connectTimeout = conf . getLong ( CoronaJobTracker . TT_CONNECT_TIMEOUT_MSEC_KEY , 10000L ) ; int rpcTimeout = conf . getInt ( CoronaJobTracker . TT_RPC_TIMEOUT_MSEC_KEY , 60000 ) ; return RPC . waitForProxy ( CoronaTaskTrackerProtocol . class , CoronaTaskTrackerProtocol . versionID , s , conf , connectTimeout , rpcTimeout ) ; }
Connect to the task tracker and get the RPC client .
33,899
public void close ( ) throws IOException { LOG . info ( "Closing the shard writer, processed " + numForms + " forms" ) ; try { try { if ( maxNumSegments > 0 ) { writer . optimize ( maxNumSegments ) ; LOG . info ( "Optimized the shard into at most " + maxNumSegments + " segments" ) ; } } finally { writer . close ( ) ; LOG . info ( "Closed Lucene index writer" ) ; } moveFromTempToPerm ( ) ; LOG . info ( "Moved new index files to " + perm ) ; } finally { dir . close ( ) ; LOG . info ( "Closed the shard writer" ) ; } }
Close the shard writer . Optimize the Lucene instance of the shard before closing if necessary and copy the files created in the temp directory to the permanent directory after closing .