idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,800
private void initialize ( Configuration conf ) throws IOException { JvmMetrics . init ( "SecondaryNameNode" , conf . get ( "session.id" ) ) ; shouldRun = true ; nameNodeAddr = NameNode . getClientProtocolAddress ( conf ) ; this . conf = conf ; this . namenode = ( NamenodeProtocol ) RPC . waitForProxy ( NamenodeProtocol...
Initialize SecondaryNameNode .
33,801
public void shutdown ( ) { shouldRun = false ; try { if ( infoServer != null ) infoServer . stop ( ) ; } catch ( Exception e ) { LOG . warn ( "Exception shutting down SecondaryNameNode" , e ) ; } try { if ( checkpointImage != null ) checkpointImage . close ( ) ; } catch ( IOException e ) { LOG . warn ( StringUtils . st...
Shut down this instance of the datanode . Returns only after shutdown is complete .
33,802
private String getInfoServer ( ) throws IOException { URI fsName = FileSystem . getDefaultUri ( conf ) ; if ( ! "hdfs" . equals ( fsName . getScheme ( ) ) ) { throw new IOException ( "This is not a DFS" ) ; } return NetUtils . getServerAddress ( conf , "dfs.info.bindAddress" , "dfs.info.port" , "dfs.http.address" ) ; }
Returns the Jetty server that the Namenode is listening on .
33,803
boolean doCheckpoint ( ) throws IOException { LOG . info ( "Checkpoint starting" ) ; startCheckpoint ( ) ; checkpointImage . ensureCurrentDirExists ( ) ; NNStorage dstStorage = checkpointImage . storage ; CheckpointSignature sig = namenode . rollEditLog ( ) ; if ( checkpointImage . getNamespaceID ( ) != 0 ) { sig . val...
Create a new checkpoint
33,804
private void doMerge ( CheckpointSignature sig , RemoteEditLogManifest manifest , boolean loadImage , FSImage dstImage ) throws IOException { if ( loadImage ) { namesystem = new FSNamesystem ( checkpointImage , conf ) ; checkpointImage . setFSNamesystem ( namesystem ) ; } assert namesystem . dir . fsImage == checkpoint...
Merge downloaded image and edits and write the new image into current storage directory .
33,805
private void writeControlFile ( FileSystem fs , Path outputPath , Path checksumFile , String name ) throws IOException { SequenceFile . Writer write = null ; try { Path parentDir = new Path ( rtc . input , "filelists" ) ; if ( ! fs . exists ( parentDir ) ) { fs . mkdirs ( parentDir ) ; } Path controlFile = new Path ( p...
This is used for verification Each mapper writes one control file control file only contains the base directory written by this mapper and the checksum file path so that we could create a Read mapper which scanned the files under the base directory and verify the checksum of files with the information given in the chec...
33,806
public GenThread [ ] prepare ( JobConf conf , Text key , Text value ) throws IOException { this . rtc = new GenWriterRunTimeConstants ( ) ; super . prepare ( conf , key , value , rtc ) ; rtc . task_name = key . toString ( ) + rtc . taskID ; rtc . roll_interval = conf . getLong ( WRITER_ROLL_INTERVAL_KEY , DEFAULT_ROLL_...
Create a number of threads to generate write traffics
33,807
private void setCatchingUp ( ) throws IOException { try { if ( inputEditStream != null && inputEditStream . isInProgress ( ) ) { catchingUp = ( inputEditStream . length ( ) - inputEditStream . getPosition ( ) > catchUpLag ) ; } else { catchingUp = true ; } } catch ( Exception e ) { catchingUp = true ; } }
Checks if the ingest is catching up . If the ingest is consuming finalized segment it s assumed to be behind . Otherwise catching up is based on the position of the input stream .
33,808
public long getLagBytes ( ) { try { if ( inputEditStream != null && inputEditStream . isInProgress ( ) ) { return Math . max ( - 1 , inputEditStream . length ( ) - this . inputEditStream . getPosition ( ) ) ; } return - 1 ; } catch ( IOException ex ) { LOG . error ( "Error getting the lag" , ex ) ; return - 1 ; } }
Returns the distance in bytes between the current position inside of the edits log and the length of the edits log
33,809
private int loadFSEdits ( ) throws IOException { FSDirectory fsDir = fsNamesys . dir ; int numEdits = 0 ; long startTime = FSNamesystem . now ( ) ; LOG . info ( "Ingest: Consuming transactions: " + this . toString ( ) ) ; try { logVersion = inputEditStream . getVersion ( ) ; if ( ! LayoutVersion . supports ( Feature . ...
Load an edit log and continue applying the changes to the in - memory structure . This is where we ingest transactions into the standby .
33,810
private FSEditLogOp ingestFSEdit ( EditLogInputStream inputEditLog ) throws IOException { FSEditLogOp op = null ; try { op = inputEditLog . readOp ( ) ; InjectionHandler . processEventIO ( InjectionEvent . INGEST_READ_OP ) ; } catch ( EOFException e ) { return null ; } catch ( IOException e ) { throw e ; } catch ( Exce...
Read a single transaction from the input edit log
33,811
private boolean shouldLoad ( long txid ) { boolean shouldLoad = txid > standby . getLastCorrectTxId ( ) ; if ( ! shouldLoad ) { LOG . info ( "Ingest: skip loading txId: " + txid + " to namesystem, but writing to edit log, last correct txid: " + standby . getLastCorrectTxId ( ) ) ; } return shouldLoad ; }
Used for ingest recovery where we erase the local edit log and transactions need to be populated to the local log but they should be not loaded into the namespace .
33,812
Path mapCachePath ( Path hdfsPath ) { assert hdfsPath . isAbsolute ( ) ; Path value = new Path ( cacheDir + Path . SEPARATOR + hdfsPath ) ; return value ; }
Maps a hdfs path into a pathname in the local cache . In the current implementation the cachePath is the same as the hdfs pathname .
33,813
public void evictCache ( Path hdfsPath , Path localPath , long size ) throws IOException { boolean done = cacheFs . delete ( localPath , false ) ; if ( ! done ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Evict for path: " + hdfsPath + " local path " + localPath + " unsuccessful." ) ; } } }
Evicts a file from the cache . If the cache is exceeding capacity then the cache calls this method to indicate that it is evicting a file from the cache . This is part of the Eviction Interface .
33,814
public FSDataOutputStream create ( Path f , FsPermission permission , boolean overwrite , int bufferSize , short replication , long blockSize , Progressable progress ) throws IOException { FSDataOutputStream fd = new FSDataOutputStream ( new CacheOutputStream ( conf , this , f , permission , overwrite , bufferSize , re...
Create new file . We start writing the data into underlying filessystem as well as the cacheFileSystem .
33,815
public int runAll ( ) { int exitCode = 0 ; if ( args == null ) { return run ( ) ; } for ( String src : args ) { try { Path srcPath = new Path ( src ) ; FileSystem fs = srcPath . getFileSystem ( getConf ( ) ) ; FileStatus [ ] statuses = fs . globStatus ( srcPath ) ; if ( statuses == null ) { System . err . println ( "Ca...
For each source path execute the command
33,816
private void printToStdout ( InputStream in ) throws IOException { try { IOUtils . copyBytes ( in , System . out , getConf ( ) , false ) ; } finally { in . close ( ) ; } }
Print from src to stdout .
33,817
private void copyToLocal ( final FileSystem srcFS , final Path src , final File dst , final boolean copyCrc ) throws IOException { if ( ! srcFS . getFileStatus ( src ) . isDir ( ) ) { if ( dst . exists ( ) ) { throw new IOException ( "Target " + dst + " already exists" ) ; } File tmp = FileUtil . createLocalTempFile ( ...
Copy a source file from a given file system to local destination .
33,818
private void tail ( String [ ] cmd , int pos ) throws IOException { CommandFormat c = new CommandFormat ( "tail" , 1 , 1 , "f" ) ; String src = null ; Path path = null ; try { List < String > parameters = c . parse ( cmd , pos ) ; src = parameters . get ( 0 ) ; } catch ( IllegalArgumentException iae ) { System . err . ...
Parse the incoming command string
33,819
public static boolean supports ( final Feature f , final int lv ) { final EnumSet < Feature > set = map . get ( lv ) ; return set != null && set . contains ( f ) ; }
Returns true if a given feature is supported in the given layout version
33,820
private int doWork ( String [ ] args ) { if ( args . length == 1 ) { CommandHandler handler = Command . getHandler ( args [ 0 ] ) ; if ( handler != null ) { return handler . doWork ( this ) ; } } printUsage ( ) ; return - 1 ; }
Main method that runs the tool for given arguments .
33,821
public Vector < TaskInProgress > reportTasksInProgress ( boolean shouldBeMap , boolean shouldBeComplete ) { Vector < TaskInProgress > results = new Vector < TaskInProgress > ( ) ; TaskInProgress tips [ ] = null ; if ( shouldBeMap ) { tips = maps ; } else { tips = reduces ; } for ( int i = 0 ; i < tips . length ; i ++ )...
Return a vector of completed TaskInProgress objects
33,822
public Vector < TaskInProgress > reportCleanupTIPs ( boolean shouldBeComplete ) { Vector < TaskInProgress > results = new Vector < TaskInProgress > ( ) ; for ( int i = 0 ; i < cleanup . length ; i ++ ) { if ( cleanup [ i ] . isComplete ( ) == shouldBeComplete ) { results . add ( cleanup [ i ] ) ; } } return results ; }
Return a vector of cleanup TaskInProgress objects
33,823
public Vector < TaskInProgress > reportSetupTIPs ( boolean shouldBeComplete ) { Vector < TaskInProgress > results = new Vector < TaskInProgress > ( ) ; for ( int i = 0 ; i < setup . length ; i ++ ) { if ( setup [ i ] . isComplete ( ) == shouldBeComplete ) { results . add ( setup [ i ] ) ; } } return results ; }
Return a vector of setup TaskInProgress objects
33,824
public TaskInProgress getTaskInProgress ( TaskID tipid ) { if ( tipid . isMap ( ) ) { if ( cleanup . length > 0 && tipid . equals ( cleanup [ 0 ] . getTIPId ( ) ) ) { return cleanup [ 0 ] ; } if ( setup . length > 0 && tipid . equals ( setup [ 0 ] . getTIPId ( ) ) ) { return setup [ 0 ] ; } for ( int i = 0 ; i < maps ....
Return the TaskInProgress that matches the tipid .
33,825
public void close ( ) throws IOException , InterruptedException { LOG . debug ( "closing connection" ) ; stream . close ( ) ; uplink . closeConnection ( ) ; uplink . interrupt ( ) ; uplink . join ( ) ; }
Close the connection and shutdown the handler thread .
33,826
private void writeObject ( Writable obj ) throws IOException { if ( obj instanceof Text ) { Text t = ( Text ) obj ; int len = t . getLength ( ) ; WritableUtils . writeVInt ( stream , len ) ; stream . write ( t . getBytes ( ) , 0 , len ) ; } else if ( obj instanceof BytesWritable ) { BytesWritable b = ( BytesWritable ) ...
Write the given object to the stream . If it is a Text or BytesWritable write it directly . Otherwise write it to a buffer and then write the length and data to the stream .
33,827
public void stop ( ) { try { if ( server != null ) { server . stop ( ) ; server . join ( ) ; } } catch ( Exception e ) { LOG . warn ( "Got exception shutting down proxy" , e ) ; } }
Stop all server threads and wait for all to finish .
33,828
public Path getParent ( ) { String path = uri . getPath ( ) ; int lastSlash = path . lastIndexOf ( '/' ) ; int start = hasWindowsDrive ( path , true ) ? 3 : 0 ; if ( ( path . length ( ) == start ) || ( lastSlash == start && path . length ( ) == start + 1 ) ) { return null ; } String parent ; if ( lastSlash == - 1 ) { p...
Returns the parent of a path or null if at root .
33,829
public Path makeQualified ( FileSystem fs ) { Path path = this ; if ( ! isAbsolute ( ) ) { FileSystem . LogForCollect . info ( "make Qualify non absolute path: " + this . toString ( ) + " working directory: " + fs . getWorkingDirectory ( ) ) ; path = new Path ( fs . getWorkingDirectory ( ) , this ) ; } URI pathUri = pa...
Returns a qualified path object .
33,830
public void configure ( JobConf job ) { this . inputFile = job . get ( "map.input.file" ) ; maxNumItems = job . getLong ( "aggregate.max.num.unique.values" , Long . MAX_VALUE ) ; }
get the input file name .
33,831
public static HardLinkFileInfo loadHardLinkFileInfo ( long hardLinkID , FSImageLoadingContext context ) { context . getFSDirectory ( ) . resetLastHardLinkIDIfLarge ( hardLinkID ) ; HardLinkFileInfo fileInfo = context . getHardLinkFileInfo ( hardLinkID ) ; if ( fileInfo == null ) { fileInfo = new HardLinkFileInfo ( hard...
Create a HardLink file info if necessary and register to the hardLinkINodeIDToFileInfoMap And return the hardLinkFileInfo which is registered in the hardLinkINodeIDToFileInfoMap
33,832
public FileStatus getNextFile ( ) throws IOException { while ( ! doneTraversal ( ) ) { while ( ! stack . isEmpty ( ) ) { Node node = stack . peek ( ) ; if ( node . hasNext ( ) ) { FileStatus element = node . next ( ) ; if ( ! element . isDir ( ) ) { return element ; } try { pushNewNode ( element ) ; } catch ( FileNotFo...
Return the next file .
33,833
@ SuppressWarnings ( "unchecked" ) public int getPartition ( K key , V value , int numPartitions ) { return partitions . findPartition ( key ) ; }
by construction we know if our keytype
33,834
@ SuppressWarnings ( "unchecked" ) private K [ ] readPartitions ( FileSystem fs , Path p , Class < K > keyClass , JobConf job ) throws IOException { SequenceFile . Reader reader = new SequenceFile . Reader ( fs , p , job ) ; ArrayList < K > parts = new ArrayList < K > ( ) ; K key = ( K ) ReflectionUtils . newInstance (...
matching key types enforced by passing in
33,835
private TrieNode buildTrie ( BinaryComparable [ ] splits , int lower , int upper , byte [ ] prefix , int maxDepth ) { final int depth = prefix . length ; if ( depth >= maxDepth || lower == upper ) { return new LeafTrieNode ( depth , splits , lower , upper ) ; } InnerTrieNode result = new InnerTrieNode ( depth ) ; byte ...
Given a sorted set of cut points build a trie that will find the correct partition quickly .
33,836
private void mkdir ( IStructuredSelection selection ) { List < DFSFolder > folders = filterSelection ( DFSFolder . class , selection ) ; if ( folders . size ( ) >= 1 ) { DFSFolder folder = folders . get ( 0 ) ; InputDialog dialog = new InputDialog ( Display . getCurrent ( ) . getActiveShell ( ) , "Create subfolder" , "...
Create a new sub - folder into an existing directory
33,837
private void open ( IStructuredSelection selection ) throws IOException , PartInitException , InvocationTargetException , InterruptedException { for ( DFSFile file : filterSelection ( DFSFile . class , selection ) ) { IStorageEditorInput editorInput = new DFSFileEditorInput ( file ) ; targetPart . getSite ( ) . getWork...
Open the selected DfsPath in the editor window
33,838
synchronized float getAverageWaitMsecsPerHardAdmissionJob ( ) { float averageWaitMsecsPerHardAdmissionJob = - 1f ; if ( ! hardAdmissionMillisQueue . isEmpty ( ) ) { long totalWait = 0 ; for ( Long waitMillis : hardAdmissionMillisQueue ) { totalWait += waitMillis ; } averageWaitMsecsPerHardAdmissionJob = ( ( float ) tot...
Get the average waiting msecs per hard admission job entrance .
33,839
synchronized JobAdmissionWaitInfo getJobAdmissionWaitInfo ( JobInProgress job ) { Integer rank = jobToRank . get ( job ) ; int position = ( rank == null ) ? - 1 : rank ; float averageWaitMsecsPerHardAdmissionJob = getAverageWaitMsecsPerHardAdmissionJob ( ) ; return new JobAdmissionWaitInfo ( exceedTaskLimit ( ) , posit...
Get the job admission wait info for a particular job .
33,840
protected void detectJournalManager ( ) throws IOException { int failures = 0 ; do { try { Stat stat = new Stat ( ) ; String primaryAddr = zk . getPrimaryAvatarAddress ( logicalName , stat , true , true ) ; if ( primaryAddr == null || primaryAddr . trim ( ) . isEmpty ( ) ) { primaryURI = null ; remoteJournalManager = n...
Detect the primary node and the current Journal Manager ;
33,841
private void expandIfNecessary ( int size ) { if ( bytes . length >= size ) { return ; } int newlength = Math . max ( 2 * bytes . length , size ) ; bytes = Arrays . copyOf ( bytes , newlength ) ; }
Expand the underlying byte array to fit size bytes .
33,842
public void write ( byte b [ ] , int off , int len ) { expandIfNecessary ( count + len ) ; System . arraycopy ( b , off , bytes , count , len ) ; count += len ; }
Writes array of bytes into the output stream .
33,843
private void newLine ( ) { numBlocks = 0 ; perms = username = group = path = linkTarget = replication = hardlinkId = "" ; filesize = 0l ; type = INode . INodeType . REGULAR_INODE . toString ( ) ; inInode = true ; }
Start a new line of output reset values .
33,844
private Path [ ] getMapFiles ( FileSystem fs , boolean isLocal ) throws IOException { List < Path > fileList = new ArrayList < Path > ( ) ; if ( isLocal ) { for ( int i = 0 ; i < numMaps ; ++ i ) { fileList . add ( mapOutputFile . getInputFile ( i , getTaskID ( ) ) ) ; } } else { for ( FileStatus filestatus : mapOutput...
Get the input files for the reducer .
33,845
private static int getClosestPowerOf2 ( int value ) { if ( value <= 0 ) throw new IllegalArgumentException ( "Undefined for " + value ) ; final int hob = Integer . highestOneBit ( value ) ; return Integer . numberOfTrailingZeros ( hob ) + ( ( ( hob >>> 1 ) & value ) == 0 ? 0 : 1 ) ; }
Return the exponent of the power of two closest to the given positive value or zero if value leq 0 . This follows the observation that the msb of a given value is also the closest power of two unless the bit following it is set .
33,846
public void init ( ) throws IOException { try { if ( zooKeeper . exists ( zooKeeperParentPath , false ) == null ) { zooKeeper . create ( zooKeeperParentPath , new byte [ ] { '0' } , ZooDefs . Ids . OPEN_ACL_UNSAFE , CreateMode . PERSISTENT ) ; LOG . info ( "Created ZNode " + zooKeeperParentPath ) ; } if ( zooKeeper . e...
Create znodes for storing ledger metadata if they have not been created before
33,847
public String fullyQualifiedPathForLedger ( EditLogLedgerMetadata e ) { String nameForLedger = nameForLedger ( e ) ; return fullyQualifiedPathForLedger ( nameForLedger ) ; }
Return the full ZNode path for a ZNode corresponding to a specific to a specific ledger s metadata .
33,848
public boolean deleteLedgerMetadata ( EditLogLedgerMetadata ledger , int version ) throws IOException { String ledgerPath = fullyQualifiedPathForLedger ( ledger ) ; try { zooKeeper . delete ( ledgerPath , version ) ; return true ; } catch ( KeeperException . NoNodeException e ) { LOG . warn ( ledgerPath + " does not ex...
Removes ledger - related Metadata from BookKeeper . Does not delete the ledger itself .
33,849
public boolean verifyEditLogLedgerMetadata ( EditLogLedgerMetadata metadata , String fullPathToVerify ) { Preconditions . checkNotNull ( metadata ) ; try { EditLogLedgerMetadata otherMetadata = readEditLogLedgerMetadata ( fullPathToVerify ) ; if ( otherMetadata == null ) { LOG . warn ( "No metadata found " + fullPathTo...
Verify that the specified EditLogLedgerMetadata instance is the same as the EditLogLedgerMetadata object stored in the specified ZNode path .
33,850
public Collection < EditLogLedgerMetadata > listLedgers ( boolean includeInProgressLedgers ) throws IOException { TreeSet < EditLogLedgerMetadata > ledgers = new TreeSet < EditLogLedgerMetadata > ( ) ; try { List < String > ledgerNames = zooKeeper . getChildren ( ledgerParentPath , false ) ; for ( String ledgerName : l...
List all ledgers in this instance s ZooKeeper namespace .
33,851
boolean doMerge ( String [ ] srcDataDirs , Collection < File > dstDataDirs , int namespaceId , NamespaceInfo nsInfo , StartupOption startOpt ) throws IOException { HashMap < File , File > dirsToMerge = new HashMap < File , File > ( ) ; int i = 0 ; for ( Iterator < File > it = dstDataDirs . iterator ( ) ; it . hasNext (...
merge the data directory from srcDataDirs to dstDataDirs
33,852
void recoverTransitionRead ( DataNode datanode , int namespaceId , NamespaceInfo nsInfo , Collection < File > dataDirs , StartupOption startOpt , String nameserviceId ) throws IOException { Collection < File > nsDataDirs = new ArrayList < File > ( ) ; for ( Iterator < File > it = dataDirs . iterator ( ) ; it . hasNext ...
recoverTransitionRead for a specific Name Space
33,853
public static void makeNameSpaceDataDir ( Collection < File > dataDirs ) throws IOException { for ( File data : dataDirs ) { try { DiskChecker . checkDir ( data ) ; } catch ( IOException e ) { LOG . warn ( "Invalid directory in: " + data . getCanonicalPath ( ) + ": " + e . getMessage ( ) ) ; } } }
Create physical directory for Name Spaces on the data node
33,854
private void doTransition ( List < StorageDirectory > sds , NamespaceInfo nsInfo , StartupOption startOpt ) throws IOException { if ( startOpt == StartupOption . ROLLBACK ) doRollback ( nsInfo ) ; int numOfDirs = sds . size ( ) ; List < StorageDirectory > dirsToUpgrade = new ArrayList < StorageDirectory > ( numOfDirs )...
Analyze which and whether a transition of the fs state is required and perform it if necessary .
33,855
private void addNameSpaceStorage ( int nsID , NameSpaceSliceStorage nsStorage ) throws IOException { if ( ! this . nsStorageMap . containsKey ( nsID ) ) { this . nsStorageMap . put ( nsID , nsStorage ) ; } }
Add nsStorage into nsStorageMap
33,856
boolean contains ( DatanodeDescriptor node ) { if ( node == null ) { return false ; } String host = node . getHost ( ) ; hostmapLock . readLock ( ) . lock ( ) ; try { DatanodeDescriptor [ ] nodes = map . get ( host ) ; if ( nodes != null ) { for ( DatanodeDescriptor containedNode : nodes ) { if ( node == containedNode ...
Check if node is already in the map .
33,857
boolean add ( DatanodeDescriptor node ) { hostmapLock . writeLock ( ) . lock ( ) ; try { if ( node == null || contains ( node ) ) { return false ; } String host = node . getHost ( ) ; DatanodeDescriptor [ ] nodes = map . get ( host ) ; DatanodeDescriptor [ ] newNodes ; if ( nodes == null ) { newNodes = new DatanodeDesc...
add node to the map return true if the node is added ; false otherwise .
33,858
boolean remove ( DatanodeDescriptor node ) { if ( node == null ) { return false ; } String host = node . getHost ( ) ; hostmapLock . writeLock ( ) . lock ( ) ; try { DatanodeDescriptor [ ] nodes = map . get ( host ) ; if ( nodes == null ) { return false ; } if ( nodes . length == 1 ) { if ( nodes [ 0 ] == node ) { map ...
remove node from the map return true if the node is removed ; false otherwise .
33,859
public DatanodeDescriptor getDatanodeByName ( String name ) { if ( name == null ) { return null ; } int colon = name . indexOf ( ":" ) ; String host ; if ( colon < 0 ) { host = name ; } else { host = name . substring ( 0 , colon ) ; } hostmapLock . readLock ( ) . lock ( ) ; try { DatanodeDescriptor [ ] nodes = map . ge...
Find data node by its name .
33,860
public void reduce ( Text arg0 , Iterator < Text > arg1 , OutputCollector < Text , Text > arg2 , Reporter arg3 ) throws IOException { throw new IOException ( "should not be called\n" ) ; }
Do nothing . Should not be called .
33,861
public synchronized Map < TaskError , Integer > getRecentErrorCounts ( long timeWindow ) { long start = System . currentTimeMillis ( ) - timeWindow ; Map < TaskError , Integer > errorCounts = createErrorCountsMap ( ) ; Iterator < Map < TaskError , Integer > > errorCountsIter = errorCountsQueue . iterator ( ) ; Iterator...
Get recent TaskError counts within the given window
33,862
private Map < String , TaskError > parseConfigFile ( URL configURL ) { Map < String , TaskError > knownErrors = new LinkedHashMap < String , TaskError > ( ) ; try { Element root = getRootElement ( configURL ) ; NodeList elements = root . getChildNodes ( ) ; for ( int i = 0 ; i < elements . getLength ( ) ; ++ i ) { Node...
Parse the error . xml file which contains the error
33,863
public void processBadResource ( int grant , boolean abandonHost ) { synchronized ( lockObject ) { Set < String > excludedHosts = null ; TaskInProgress tip = requestToTipMap . get ( grant ) ; if ( ! job . canLaunchJobCleanupTask ( ) && ( ! tip . isRunnable ( ) || ( tip . isRunning ( ) && ! ( speculatedMaps . contains (...
Return this grant and request a different one . This can happen because the task has failed was killed or the job tracker decided that the resource is bad
33,864
private void updateTaskStatuses ( TaskTrackerStatus status ) { TaskTrackerInfo trackerInfo = TaskTrackerInfo . fromStatus ( status ) ; String trackerName = status . getTrackerName ( ) ; for ( TaskStatus report : status . getTaskReports ( ) ) { report . setTaskTracker ( trackerName ) ; LOG . debug ( "Task status report:...
Updates job and tasks state according to report from TaskTracker
33,865
private void updateTaskStatus ( TaskTrackerInfo info , TaskStatus report ) { TaskAttemptID taskId = report . getTaskID ( ) ; if ( ! this . jobId . equals ( taskId . getJobID ( ) ) ) { LOG . warn ( "Task " + taskId + " belongs to unknown job " + taskId . getJobID ( ) ) ; return ; } TaskInProgress tip = taskLookupTable ....
Updates job and tasks state according to TaskStatus from given TaskTracker . This function only updates internal job state it shall NOT issue any actions directly .
33,866
private void saveNewRequestForTip ( TaskInProgress tip , ResourceRequest req ) { requestToTipMap . put ( req . getId ( ) , tip ) ; TaskContext context = taskToContextMap . get ( tip ) ; if ( context == null ) { context = new TaskContext ( req ) ; } else { context . resourceRequests . add ( req ) ; } taskToContextMap . ...
Saves new request for given tip no recording in resource tracker happens
33,867
public JobID getNewJobId ( ) throws IOException { int value = jobCounter . incrementAndGet ( ) ; if ( value > 1 ) { throw new RuntimeException ( "CoronaJobTracker can only run one job! (value=" + value + ")" ) ; } createSession ( ) ; jobId = jobIdFromSessionId ( sessionId ) ; return jobId ; }
Returns a unique JobID for a new job . CoronaJobTracker can only run a single job and it s id is fixed a - priori
33,868
private void dispatchCommitActions ( List < CommitTaskAction > commitActions ) throws IOException { if ( ! commitActions . isEmpty ( ) ) { TaskAttemptID [ ] wasCommitting ; try { wasCommitting = commitPermissionClient . getAndSetCommitting ( commitActions ) ; } catch ( IOException e ) { LOG . error ( "Commit permission...
Executes actions that can be executed after asking commit permission authority
33,869
private boolean isMatchingJobId ( JobID jobId ) { if ( isStandalone ) { return this . jobId . equals ( jobId ) ; } else { return this . jobId . equals ( getMainJobID ( jobId ) ) ; } }
Check if given id matches job id of this JT .
33,870
public void expiredLaunchingTask ( TaskAttemptID taskId ) { synchronized ( lockObject ) { String trackerName = taskLookupTable . getAssignedTracker ( taskId ) ; trackerStats . recordTimeout ( trackerName ) ; localJTSubmitter . submit ( new TaskTimeout ( trackerName ) ) ; failTask ( taskId , "Error launching task" , fal...
Handle a task that could not be launched .
33,871
public void prepareFailover ( ) { if ( ! RemoteJTProxy . isJTRestartingEnabled ( conf ) ) { return ; } LOG . info ( "prepareFailover done" ) ; this . isPurgingJob = false ; if ( this . parentHeartbeat != null ) { this . interTrackerServer . stop ( ) ; } }
Some perparation job needed by remote job tracker for failover
33,872
public int go ( ) throws IOException { try { return run ( argv_ ) ; } catch ( Exception ex ) { throw new IOException ( ex . getMessage ( ) ) ; } }
This is the method that actually initializes the job conf and submits the job to the jobtracker
33,873
protected void listJobConfProperties ( ) { msg ( "==== JobConf properties:" ) ; Iterator it = jobConf_ . iterator ( ) ; TreeMap sorted = new TreeMap ( ) ; while ( it . hasNext ( ) ) { Map . Entry en = ( Map . Entry ) it . next ( ) ; sorted . put ( en . getKey ( ) , en . getValue ( ) ) ; } it = sorted . entrySet ( ) . i...
Prints out the jobconf properties on stdout when verbose is specified .
33,874
public int submitAndMonitorJob ( ) throws IOException { if ( jar_ != null && isLocalHadoop ( ) ) { File wd = new File ( "." ) . getAbsoluteFile ( ) ; StreamUtil . unJar ( new File ( jar_ ) , wd ) ; } jc_ = new JobClient ( jobConf_ ) ; boolean error = true ; running_ = null ; String lastReport = null ; try { running_ = ...
Based on JobClient
33,875
private void checkIfLastPacketTimeout ( ) { synchronized ( ackQueue ) { if ( ! ackQueue . isEmpty ( ) && ( System . currentTimeMillis ( ) - lastPacketSentTime > packetTimeout ) ) { DFSClient . LOG . warn ( "Packet " + ackQueue . getLast ( ) . seqno + " of " + block + " is timed out" ) ; } } }
Check if the last outstanding packet has not received an ack before it is timed out . If true for now just log it . We will provide a decent solution to this later on .
33,876
private boolean setupPipelineForAppend ( LocatedBlock lastBlock ) throws IOException { if ( nodes == null || nodes . length == 0 ) { String msg = "Could not get block locations. " + "Source file \"" + src + "\" - Aborting..." ; DFSClient . LOG . warn ( msg ) ; setLastException ( new IOException ( msg ) ) ; closed = tru...
Setup the Append pipeline the length of current pipeline will shrink if any datanodes are dead during the process .
33,877
private DatanodeInfo [ ] nextBlockOutputStream ( String client ) throws IOException { LocatedBlock lb = null ; boolean retry = false ; DatanodeInfo [ ] nodes ; ArrayList < DatanodeInfo > excludedNodes = new ArrayList < DatanodeInfo > ( ) ; int count = dfsClient . conf . getInt ( "dfs.client.block.write.retries" , 3 ) ;...
Open a DataOutputStream to a DataNode so that it can be written to . This happens when a file is created and each time a new block is allocated . Must get block ID and the IDs of the destinations from the namenode . Returns the list of target datanodes .
33,878
public void sync ( ) throws IOException { long start = System . currentTimeMillis ( ) ; try { long toWaitFor ; synchronized ( this ) { eventStartSync ( ) ; long saveOffset = bytesCurBlock ; DFSOutputStreamPacket oldCurrentPacket = currentPacket ; flushBuffer ( false , shouldKeepPartialChunkData ( ) ) ; eventSyncStartWa...
All data is written out to datanodes . It is not guaranteed that data has been flushed to persistent store on the datanode . Block allocations are persisted on namenode .
33,879
private void flushInternal ( ) throws IOException { isClosed ( ) ; dfsClient . checkOpen ( ) ; long toWaitFor ; synchronized ( this ) { enqueueCurrentPacket ( ) ; toWaitFor = lastQueuedSeqno ; } waitForAckedSeqno ( toWaitFor ) ; }
Waits till all existing data is flushed and confirmations received from datanodes .
33,880
private void closeThreads ( ) throws IOException { try { if ( streamer != null ) { streamer . close ( ) ; streamer . join ( ) ; } if ( response != null ) { response . close ( ) ; response . join ( ) ; response = null ; } } catch ( InterruptedException e ) { throw new InterruptedIOException ( "Failed to shutdown respons...
shutdown datastreamer and responseprocessor threads .
33,881
public void output ( K key , V value ) throws IOException { collector . collect ( key , value ) ; }
The task output a normal record .
33,882
public void partitionedOutput ( int reduce , K key , V value ) throws IOException { PipesPartitioner . setNextPartition ( reduce ) ; collector . collect ( key , value ) ; }
The task output a record with a partition number attached .
33,883
public void progress ( float progress ) throws IOException { progressValue = progress ; reporter . progress ( ) ; if ( recordReader != null ) { progressKey . set ( progress ) ; recordReader . next ( progressKey , nullValue ) ; } }
Update the amount done and call progress on the reporter .
33,884
public synchronized boolean waitForFinish ( ) throws Throwable { while ( ! done && exception == null ) { wait ( ) ; } if ( exception != null ) { throw exception ; } return done ; }
Wait for the task to finish or abort .
33,885
private Map < Integer , DiskScanInfo [ ] > getDiskReportPerNamespace ( ) { if ( dataset . volumes == null ) { LOG . warn ( "Dataset volumes are not initialized yet" ) ; return new HashMap < Integer , DiskScanInfo [ ] > ( ) ; } FSVolume [ ] volumes = dataset . volumes . getVolumes ( ) ; ScanInfoListPerNamespace [ ] volu...
Get lists of blocks on the disk sorted by blockId per namespace
33,886
void checkDifferenceAndReconcile ( ) { resetDiffsAndStats ( ) ; checkDifference ( ) ; for ( Entry < Integer , LinkedList < ScanDifference > > entry : diffsPerNamespace . entrySet ( ) ) { Integer namespaceId = entry . getKey ( ) ; LinkedList < ScanDifference > diff = entry . getValue ( ) ; for ( ScanDifference info : di...
Reconcile differences between disk and in - memory blocks
33,887
public static File createJarPackage ( IResource resource ) { JarModule jarModule = new JarModule ( resource ) ; try { PlatformUI . getWorkbench ( ) . getProgressService ( ) . run ( false , true , jarModule ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; return null ; } File jarFile = jarModule . getJarFile ( ) ...
Static way to create a JAR package for the given resource and showing a progress bar
33,888
DatanodeDescriptor [ ] getValidTargets ( ) { if ( targetGSs == null ) { return null ; } int count = 0 ; long lastBlockGS = this . getLastBlock ( ) . getGenerationStamp ( ) ; for ( long targetGS : targetGSs ) { if ( lastBlockGS == targetGS ) { count ++ ; } } if ( count == 0 ) { return null ; } if ( count == targets . le...
Return the targets with generation stamp matching that of the last block
33,889
void setTargets ( DatanodeDescriptor [ ] locs , long generationStamp ) { setTargets ( locs ) ; if ( locs == null ) { targetGSs = null ; return ; } long [ ] targetGSs = new long [ locs . length ] ; for ( int i = 0 ; i < targetGSs . length ; i ++ ) { targetGSs [ i ] = generationStamp ; } this . targetGSs = targetGSs ; }
Set targets for list of replicas all sharing the same generationStamp
33,890
boolean addTarget ( DatanodeDescriptor node , long generationStamp ) { if ( this . targets == null ) { this . targets = new DatanodeDescriptor [ 0 ] ; } for ( int i = 0 ; i < targets . length ; i ++ ) { if ( targets [ i ] . equals ( node ) ) { if ( generationStamp != targetGSs [ i ] ) { targetGSs [ i ] = generationStam...
add this target if it does not already exists . Returns true if the target was added .
33,891
void assignPrimaryDatanode ( ) { if ( targets . length == 0 ) { NameNode . stateChangeLog . warn ( "BLOCK*" + " INodeFileUnderConstruction.initLeaseRecovery:" + " No blocks found, lease removed." ) ; } int previous = primaryNodeIndex ; Block lastBlock = this . getLastBlock ( ) ; for ( int i = 1 ; i <= targets . length ...
Initialize lease recovery for this object
33,892
synchronized boolean setLastRecoveryTime ( long now ) { boolean expired = now - lastRecoveryTime > NameNode . LEASE_RECOVER_PERIOD ; if ( expired ) { lastRecoveryTime = now ; } return expired ; }
Update lastRecoveryTime if expired .
33,893
int collectSubtreeBlocksAndClear ( List < BlockInfo > v , int blocksLimit , List < INode > removedINodes ) { clearTargets ( ) ; return super . collectSubtreeBlocksAndClear ( v , blocksLimit , removedINodes ) ; }
When deleting an open file we should remove it from the list of its targets .
33,894
private void removeINodeFromDatanodeDescriptors ( DatanodeDescriptor [ ] targets ) { if ( targets != null ) { for ( DatanodeDescriptor node : targets ) { node . removeINode ( this ) ; } } }
Remove this INodeFileUnderConstruction from the list of datanodes .
33,895
private void addINodeToDatanodeDescriptors ( DatanodeDescriptor [ ] targets ) { if ( targets != null ) { for ( DatanodeDescriptor node : targets ) { node . addINode ( this ) ; } } }
Add this INodeFileUnderConstruction to the list of datanodes .
33,896
protected boolean combine ( Object [ ] srcs , TupleWritable dst ) { assert srcs . length == dst . size ( ) ; return true ; }
Emit everything from the collector .
33,897
public CoronaTaskTrackerProtocol getClient ( String host , int port ) throws IOException { String key = makeKey ( host , port ) ; Node ttNode = topologyCache . getNode ( host ) ; CoronaTaskTrackerProtocol client = null ; synchronized ( ttNode ) { client = trackerClients . get ( key ) ; if ( client == null ) { client = ...
API to get the RPC client .
33,898
private CoronaTaskTrackerProtocol createClient ( String host , int port ) throws IOException { String staticHost = NetUtils . getStaticResolution ( host ) ; InetSocketAddress s = null ; InetAddress inetAddress = null ; byte [ ] byteArr = null ; if ( staticHost != null ) { inetAddress = InetAddress . getByName ( staticH...
Connect to the task tracker and get the RPC client .
33,899
public void close ( ) throws IOException { LOG . info ( "Closing the shard writer, processed " + numForms + " forms" ) ; try { try { if ( maxNumSegments > 0 ) { writer . optimize ( maxNumSegments ) ; LOG . info ( "Optimized the shard into at most " + maxNumSegments + " segments" ) ; } } finally { writer . close ( ) ; L...
Close the shard writer . Optimize the Lucene instance of the shard before closing if necessary and copy the files created in the temp directory to the permanent directory after closing .