idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,900
private void restoreGeneration ( FileSystem fs , Path perm , long startGen ) throws IOException { FileStatus [ ] fileStatus = fs . listStatus ( perm , new PathFilter ( ) { public boolean accept ( Path path ) { return LuceneUtil . isSegmentsFile ( path . getName ( ) ) ; } } ) ; for ( int i = 0 ; i < fileStatus . length ; i ++ ) { Path path = fileStatus [ i ] . getPath ( ) ; if ( startGen < LuceneUtil . generationFromSegmentsFileName ( path . getName ( ) ) ) { fs . delete ( path ) ; } } Path segmentsGenFile = new Path ( LuceneUtil . IndexFileNames . SEGMENTS_GEN ) ; if ( fs . exists ( segmentsGenFile ) ) { fs . delete ( segmentsGenFile ) ; } }
once the unwanted segments_N files are deleted
33,901
private void moveFromTempToPerm ( ) throws IOException { try { FileStatus [ ] fileStatus = localFs . listStatus ( temp , LuceneIndexFileNameFilter . getFilter ( ) ) ; Path segmentsPath = null ; Path segmentsGenPath = null ; for ( int i = 0 ; i < fileStatus . length ; i ++ ) { Path path = fileStatus [ i ] . getPath ( ) ; String name = path . getName ( ) ; if ( LuceneUtil . isSegmentsGenFile ( name ) ) { assert ( segmentsGenPath == null ) ; segmentsGenPath = path ; } else if ( LuceneUtil . isSegmentsFile ( name ) ) { assert ( segmentsPath == null ) ; segmentsPath = path ; } else { fs . completeLocalOutput ( new Path ( perm , name ) , path ) ; } } if ( segmentsPath != null ) { fs . completeLocalOutput ( new Path ( perm , segmentsPath . getName ( ) ) , segmentsPath ) ; } if ( segmentsGenPath != null ) { fs . completeLocalOutput ( new Path ( perm , segmentsGenPath . getName ( ) ) , segmentsGenPath ) ; } } finally { localFs . delete ( temp ) ; } }
and then delete the temp dir from the local FS
33,902
public short applyNewPermission ( FileStatus file ) { FsPermission perms = file . getPermission ( ) ; int existing = perms . toShort ( ) ; boolean exeOk = file . isDir ( ) || ( existing & 0111 ) != 0 ; return ( short ) combineModes ( existing , exeOk ) ; }
Apply permission against specified file and determine what the new mode would be
33,903
public synchronized void store ( long maxTxId ) throws IOException { long currentMaxTxId = get ( ) ; if ( currentMaxTxId < maxTxId ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Resetting maxTxId to " + maxTxId ) ; } set ( maxTxId ) ; } }
Store the specified transaction id in the maxTxId ZNode if the specified maxTxId is greater than the existing maxTxId .
33,904
public synchronized long get ( ) throws IOException { try { lastZNodeStat = zooKeeper . exists ( fullyQualifiedZNode , false ) ; if ( lastZNodeStat == null ) { return - 1 ; } byte [ ] data = zooKeeper . getData ( fullyQualifiedZNode , false , lastZNodeStat ) ; WritableUtil . readWritableFromByteArray ( data , maxTxIdWritable ) ; return maxTxIdWritable . get ( ) ; } catch ( KeeperException e ) { keeperException ( "Unrecoverable ZooKeeper error reading " + fullyQualifiedZNode , e ) ; return - 1 ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted reading " + fullyQualifiedZNode , e ) ; return - 1 ; } }
Get the current max transaction id from ZooKeeper
33,905
private void checkAndSetServiceName ( Configuration conf , StartupInfo info ) throws ConfigurationException { String fedrationMode = conf . get ( FSConstants . DFS_FEDERATION_NAMESERVICES ) ; String serviceName = info . serviceName ; if ( fedrationMode != null && ! fedrationMode . trim ( ) . isEmpty ( ) ) { if ( serviceName == null || serviceName . trim ( ) . isEmpty ( ) ) { throw new ConfigurationException ( "This is a fedrated DFS cluster, nameservice id is required." ) ; } this . serviceName = serviceName ; } }
Check if this is a fedrated cluster and set the service name .
33,906
public void shutdown ( ) { LOG . info ( "Shutting down ..." ) ; shouldShutdown = true ; if ( tserver != null ) { tserver . stop ( ) ; } started = false ; }
Called when the Namespace Notifier server should shutdown .
33,907
public long addClientAndConnect ( String host , int port ) throws TTransportException , IOException { long clientId = getNewClientId ( ) ; LOG . info ( "Adding client with id=" + clientId + " host=" + host + " port=" + port + " and connecting ..." ) ; ClientHandler . Client clientHandler ; try { clientHandler = getClientConnection ( host , port ) ; LOG . info ( "Succesfully connected to client " + clientId ) ; } catch ( IOException e1 ) { LOG . error ( "Failed to connect to client " + clientId , e1 ) ; throw e1 ; } catch ( TTransportException e2 ) { LOG . error ( "Failed to connect to client " + clientId , e2 ) ; throw e2 ; } ClientData clientData = new ClientData ( clientId , clientHandler , host , port ) ; addClient ( clientData ) ; LOG . info ( "Successfully added client " + clientId + " and connected." ) ; return clientId ; }
Adds the client to the internal data structures and connects to it . If the method throws an exception then it is guaranteed it will also be removed from the internal structures before throwing the exception .
33,908
public void addClient ( ClientData clientData ) { clientsData . put ( clientData . id , clientData ) ; dispatcher . assignClient ( clientData . id ) ; LOG . info ( "Succesfully added client " + clientData ) ; metrics . numRegisteredClients . set ( clientsData . size ( ) ) ; }
Adds the client to the internal structures
33,909
public boolean removeClient ( long clientId ) { ClientData clientData = clientsData . get ( clientId ) ; if ( clientData == null ) { return false ; } dispatcher . removeClient ( clientId ) ; synchronized ( subscriptions ) { for ( Set < Long > subscribedSet : clientData . subscriptions ) { synchronized ( subscribedSet ) { subscribedSet . remove ( clientId ) ; } } } metrics . numTotalSubscriptions . set ( numTotalSubscriptions . getAndAdd ( - clientData . subscriptions . size ( ) ) ) ; clientsData . remove ( clientId ) ; LOG . info ( "Removed client " + clientData ) ; metrics . numRegisteredClients . set ( clientsData . size ( ) ) ; return true ; }
Removes a client from the internal data structures . This also removes the client from all the events to which he subscribed .
33,910
public Queue < NamespaceNotification > getClientNotificationQueue ( long clientId ) { ClientData clientData = clientsData . get ( clientId ) ; return ( clientData == null ) ? null : clientData . queue ; }
Gets the queue of notifications that should be sent to a client . It is important to send the notifications in this queue first before sending any other notification or the order will be affected .
33,911
private void queueNotifications ( long clientId , NamespaceEvent event , long txId ) throws TransactionIdTooOldException , InvalidClientIdException { if ( txId == - 1 ) { return ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Queueing notifications for client " + clientId + " from txId " + txId + " at [" + event . path + ", " + EventType . fromByteValue ( event . type ) + "] ..." ) ; } ClientData clientData = clientsData . get ( clientId ) ; if ( clientData == null ) { LOG . error ( "Missing the client data for client id: " + clientId ) ; throw new InvalidClientIdException ( "Missing the client data" ) ; } serverHistory . addNotificationsToQueue ( event , txId , clientData . queue ) ; }
Queues the notification for a client . The queued notifications will be sent asynchronously after this method returns to the specified client .
33,912
private long getNewClientId ( ) { while ( true ) { long clientId = Math . abs ( clientIdsGenerator . nextLong ( ) ) ; if ( ! clientsData . containsKey ( clientId ) ) { return clientId ; } } }
Generates a new client id which is not present in the current set of ids for the clients which are subscribed to this server .
33,913
public Long putIfAbsentChecksum ( Block blk , Long newChecksum ) throws IOException { Long oldChecksum = putIfAbsent ( blk , newChecksum ) ; if ( oldChecksum != null && ! oldChecksum . equals ( newChecksum ) ) { throw new IOException ( "Block " + blk . toString ( ) + " has different checksums " + oldChecksum + "(old) and " + newChecksum + "(new)" ) ; } return oldChecksum ; }
Save the checksum for a raided block into store and compare the old value with new value if different throw an exception
33,914
protected void fillJoinCollector ( K iterkey ) throws IOException { final PriorityQueue < ComposableRecordReader < K , ? > > q = getRecordReaderQueue ( ) ; if ( ! q . isEmpty ( ) ) { int highpos = - 1 ; ArrayList < ComposableRecordReader < K , ? > > list = new ArrayList < ComposableRecordReader < K , ? > > ( kids . length ) ; q . peek ( ) . key ( iterkey ) ; final WritableComparator cmp = getComparator ( ) ; while ( 0 == cmp . compare ( q . peek ( ) . key ( ) , iterkey ) ) { ComposableRecordReader < K , ? > t = q . poll ( ) ; if ( - 1 == highpos || list . get ( highpos ) . id ( ) < t . id ( ) ) { highpos = list . size ( ) ; } list . add ( t ) ; if ( q . isEmpty ( ) ) break ; } ComposableRecordReader < K , ? > t = list . remove ( highpos ) ; t . accept ( jc , iterkey ) ; for ( ComposableRecordReader < K , ? > rr : list ) { rr . skip ( iterkey ) ; } list . add ( t ) ; for ( ComposableRecordReader < K , ? > rr : list ) { if ( rr . hasNext ( ) ) { q . add ( rr ) ; } } } }
Instead of filling the JoinCollector with iterators from all data sources fill only the rightmost for this key . This not only saves space by discarding the other sources but it also emits the number of key - value pairs in the preferred RecordReader instead of repeating that stream n times where n is the cardinality of the cross product of the discarded streams for the given key .
33,915
static long getGenerationStampFromSeperateChecksumFile ( String [ ] listdir , String blockName ) { for ( int j = 0 ; j < listdir . length ; j ++ ) { String path = listdir [ j ] ; if ( ! path . startsWith ( blockName ) ) { continue ; } String [ ] vals = StringUtils . split ( path , '_' ) ; if ( vals . length != 3 ) { continue ; } String [ ] str = StringUtils . split ( vals [ 2 ] , '.' ) ; if ( str . length != 2 ) { continue ; } return Long . parseLong ( str [ 0 ] ) ; } DataNode . LOG . warn ( "Block " + blockName + " does not have a metafile!" ) ; return Block . GRANDFATHER_GENERATION_STAMP ; }
Find the metadata file for the specified block file . Return the generation stamp from the name of the metafile .
33,916
static long parseGenerationStampInMetaFile ( File blockFile , File metaFile ) throws IOException { String metaname = metaFile . getName ( ) ; String gs = metaname . substring ( blockFile . getName ( ) . length ( ) + 1 , metaname . length ( ) - FSDataset . METADATA_EXTENSION . length ( ) ) ; try { return Long . parseLong ( gs ) ; } catch ( NumberFormatException nfe ) { throw ( IOException ) new IOException ( "blockFile=" + blockFile + ", metaFile=" + metaFile ) . initCause ( nfe ) ; } }
Find generation stamp from block file and meta file .
33,917
static public boolean metaFileExists ( FSDatasetInterface dataset , int namespaceId , Block b ) throws IOException { return getMetaFile ( dataset , namespaceId , b ) . exists ( ) ; }
Does the meta file exist for this block?
33,918
public static DataInputStream wrapInputStream ( InputStream is , int bufferSize , int readBufferSize ) { return new DataInputStream ( new BufferedInputStream ( new BufferedByteInputStream ( is , bufferSize , readBufferSize ) ) ) ; }
Wrap given input stream with BufferedByteInputOutput . This is the only way to instantiate the buffered input stream .
33,919
public void close ( ) throws IOException { readThread . close ( ) ; try { readThread . join ( ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } }
Close the input stream . Joins the thread and closes the underlying input stream . Can be called multiple times .
33,920
private int checkOutput ( int readBytes ) throws IOException { if ( readBytes > - 1 ) { return readBytes ; } if ( closed ) { throw new IOException ( "The stream has been closed" ) ; } if ( readThread . error != null ) { throw new IOException ( readThread . error . getMessage ( ) ) ; } return readBytes ; }
Check if the read thread died if so throw an exception .
33,921
private void initTrackersToTasksMap ( Collection < JobInProgress > jobsInProgress ) { for ( TaskTrackerStatus tracker : taskTrackersDetails ) { taskTrackerExtendedTasks . put ( tracker . getTrackerName ( ) , new ArrayList < TaskStatus > ( ) ) ; } for ( JobInProgress job : jobsInProgress ) { total_map_tasks += job . getTasks ( TaskType . MAP ) . length ; total_reduce_tasks += job . getTasks ( TaskType . REDUCE ) . length ; for ( TaskInProgress task : job . getTasks ( TaskType . REDUCE ) ) { TaskStatus [ ] taskStatuses = task . getTaskStatuses ( ) ; for ( TaskStatus status : taskStatuses ) { Collection < TaskStatus > trackerTasks = taskTrackerExtendedTasks . get ( status . getTaskTracker ( ) ) ; if ( trackerTasks == null ) { trackerTasks = new ArrayList < TaskStatus > ( ) ; taskTrackerExtendedTasks . put ( status . getTaskTracker ( ) , trackerTasks ) ; } trackerTasks . add ( status ) ; } } for ( TaskInProgress task : job . getTasks ( TaskType . MAP ) ) { TaskStatus [ ] taskStatuses = task . getTaskStatuses ( ) ; for ( TaskStatus status : taskStatuses ) { Collection < TaskStatus > trackerTasks = taskTrackerExtendedTasks . get ( status . getTaskTracker ( ) ) ; if ( trackerTasks == null ) { trackerTasks = new ArrayList < TaskStatus > ( ) ; taskTrackerExtendedTasks . put ( status . getTaskTracker ( ) , trackerTasks ) ; } trackerTasks . add ( status ) ; } } } }
Goes through the list of TaskStatus objects for each of the running jobs on the cluster and associates them with the name of the task tracker they are or were running on .
33,922
public Path getOutputFile ( TaskAttemptID mapTaskId ) throws IOException { return lDirAlloc . getLocalPathToRead ( TaskTracker . getIntermediateOutputDir ( jobId . toString ( ) , mapTaskId . toString ( ) ) + "/file.out" , conf ) ; }
Return the path to local map output file created earlier
33,923
public Path getSpillFileForWrite ( TaskAttemptID mapTaskId , int spillNumber , long size ) throws IOException { return lDirAlloc . getLocalPathForWrite ( TaskTracker . getIntermediateOutputDir ( jobId . toString ( ) , mapTaskId . toString ( ) ) + "/spill" + spillNumber + ".out" , size , conf ) ; }
Create a local map spill file name .
33,924
public Path getSpillIndexFile ( TaskAttemptID mapTaskId , int spillNumber ) throws IOException { return lDirAlloc . getLocalPathToRead ( TaskTracker . getIntermediateOutputDir ( jobId . toString ( ) , mapTaskId . toString ( ) ) + "/spill" + spillNumber + ".out.index" , conf ) ; }
Return a local map spill index file created earlier
33,925
public Path getInputFile ( int mapId , TaskAttemptID reduceTaskId ) throws IOException { return lDirAlloc . getLocalPathToRead ( TaskTracker . getIntermediateOutputDir ( jobId . toString ( ) , reduceTaskId . toString ( ) ) + "/map_" + mapId + ".out" , conf ) ; }
Return a local reduce input file created earlier
33,926
public Path getInputFileForWrite ( TaskID mapId , TaskAttemptID reduceTaskId , long size ) throws IOException { return lDirAlloc . getLocalPathForWrite ( TaskTracker . getIntermediateOutputDir ( jobId . toString ( ) , reduceTaskId . toString ( ) ) + "/map_" + mapId . getId ( ) + ".out" , size , conf ) ; }
Create a local reduce input file name .
33,927
public void removeAll ( TaskAttemptID taskId ) throws IOException { String toBeDeleted = TaskTracker . getIntermediateOutputDir ( jobId . toString ( ) , taskId . toString ( ) ) ; if ( asyncDiskService != null ) { asyncDiskService . moveAndDeleteFromEachVolume ( toBeDeleted ) ; LOG . info ( "Move and then delete map ouput " + toBeDeleted + " for task " + taskId ) ; return ; } LOG . info ( "Delete map ouput " + toBeDeleted + " for task " + taskId ) ; conf . deleteLocalFiles ( toBeDeleted ) ; }
Removes all of the files related to a task .
33,928
public synchronized void setLastBlockSize ( long blockId , long blockSize ) { assert blocks . size ( ) > 0 ; LocatedBlock last = blocks . get ( blocks . size ( ) - 1 ) ; if ( underConstruction && blockSize > last . getBlockSize ( ) ) { assert blockId == last . getBlock ( ) . getBlockId ( ) ; this . setFileLength ( this . getFileLength ( ) + blockSize - last . getBlockSize ( ) ) ; last . setBlockSize ( blockSize ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "DFSClient setting last block " + last + " to length " + blockSize + " filesize is now " + getFileLength ( ) ) ; } } }
If file is under construction set block size of the last block . It updates file length in the same time .
33,929
public ByteBuffer readAll ( ) throws IOException { MappedByteBuffer bb = dataFileChannel . map ( FileChannel . MapMode . READ_ONLY , startOffset , length ) ; return bb ; }
Maps in the relevant portion of the file . This avoid copying the data from OS pages into this process s page . It will be automatically unmapped when the ByteBuffer that is returned here goes out of scope . This method is currently invoked only by the FSDataInputStream ScatterGather api .
33,930
public static TFactoryBasedThreadPoolServer createNewServer ( TProcessor processor , ServerSocket serverSocket , int socketTimeOut ) throws IOException { TServerSocket socket = new TServerSocket ( serverSocket , socketTimeOut ) ; TFactoryBasedThreadPoolServer . Args args = new TFactoryBasedThreadPoolServer . Args ( socket ) ; args . stopTimeoutVal = 0 ; args . processor ( processor ) ; args . transportFactory ( new TFramedTransport . Factory ( ) ) ; args . protocolFactory ( new TBinaryProtocol . Factory ( true , true ) ) ; return new TFactoryBasedThreadPoolServer ( args , new TFactoryBasedThreadPoolServer . DaemonThreadFactory ( ) ) ; }
This is a helper method which creates a TFactoryBased .. Server object using the processor ServerSocket object and socket timeout limit . This is useful when we change the mechanism of server object creation . As a result we don t have to change code in multiple places .
33,931
private static void fillInMissingMapOutputs ( FileSystem fs , TaskAttemptID taskId , int numMaps , JobConf conf ) throws IOException { Class < ? extends WritableComparable > keyClass = conf . getMapOutputKeyClass ( ) . asSubclass ( WritableComparable . class ) ; Class < ? extends Writable > valueClass = conf . getMapOutputValueClass ( ) . asSubclass ( Writable . class ) ; MapOutputFile namer = new MapOutputFile ( taskId . getJobID ( ) ) ; namer . setConf ( conf ) ; for ( int i = 0 ; i < numMaps ; i ++ ) { Path f = namer . getInputFile ( i , taskId ) ; if ( ! fs . exists ( f ) ) { LOG . info ( "Create missing input: " + f ) ; SequenceFile . Writer out = SequenceFile . createWriter ( fs , conf , f , keyClass , valueClass ) ; out . close ( ) ; } } }
Create empty sequence files for any of the map outputs that we don t have .
33,932
public static void main ( String [ ] args ) throws ClassNotFoundException , IOException , InterruptedException { if ( args . length != 1 ) { System . out . println ( "Usage: IsolationRunner <path>/job.xml" ) ; System . exit ( 1 ) ; } File jobFilename = new File ( args [ 0 ] ) ; if ( ! jobFilename . exists ( ) || ! jobFilename . isFile ( ) ) { System . out . println ( jobFilename + " is not a valid job file." ) ; System . exit ( 1 ) ; } JobConf conf = new JobConf ( new Path ( jobFilename . toString ( ) ) ) ; TaskAttemptID taskId = TaskAttemptID . forName ( conf . get ( "mapred.task.id" ) ) ; boolean isMap = conf . getBoolean ( "mapred.task.is.map" , true ) ; int partition = conf . getInt ( "mapred.task.partition" , 0 ) ; FileSystem local = FileSystem . getLocal ( conf ) ; LocalDirAllocator lDirAlloc = new LocalDirAllocator ( "mapred.local.dir" ) ; File workDirName = new File ( lDirAlloc . getLocalPathToRead ( TaskTracker . getLocalTaskDir ( taskId . getJobID ( ) . toString ( ) , taskId . toString ( ) ) + Path . SEPARATOR + "work" , conf ) . toString ( ) ) ; local . setWorkingDirectory ( new Path ( workDirName . toString ( ) ) ) ; FileSystem . get ( conf ) . setWorkingDirectory ( conf . getWorkingDirectory ( ) ) ; ClassLoader classLoader = makeClassLoader ( conf , workDirName ) ; Thread . currentThread ( ) . setContextClassLoader ( classLoader ) ; conf . setClassLoader ( classLoader ) ; Task task ; if ( isMap ) { Path localSplit = new Path ( new Path ( jobFilename . toString ( ) ) . getParent ( ) , "split.dta" ) ; DataInputStream splitFile = FileSystem . getLocal ( conf ) . open ( localSplit ) ; String splitClass = Text . readString ( splitFile ) ; BytesWritable split = new BytesWritable ( ) ; split . readFields ( splitFile ) ; splitFile . close ( ) ; task = new MapTask ( jobFilename . toString ( ) , taskId , partition , splitClass , split , 1 , conf . getUser ( ) ) ; } else { int numMaps = conf . getNumMapTasks ( ) ; fillInMissingMapOutputs ( local , taskId , numMaps , conf ) ; task = new ReduceTask ( jobFilename . toString ( ) , taskId , partition , numMaps , 1 , conf . getUser ( ) ) ; } task . setConf ( conf ) ; task . run ( conf , new FakeUmbilical ( ) ) ; }
Run a single task
33,933
public void upload ( IProgressMonitor monitor , final File file ) throws IOException { if ( file . isDirectory ( ) ) { Path filePath = new Path ( this . path , file . getName ( ) ) ; getDFS ( ) . mkdirs ( filePath ) ; DFSFolder newFolder = new DFSFolder ( this , filePath ) ; monitor . worked ( 1 ) ; for ( File child : file . listFiles ( ) ) { if ( monitor . isCanceled ( ) ) return ; newFolder . upload ( monitor , child ) ; } } else if ( file . isFile ( ) ) { Path filePath = new Path ( this . path , file . getName ( ) ) ; DFSFile newFile = new DFSFile ( this , filePath , file , monitor ) ; } else { } }
Upload the given file or directory into this DfsFolder
33,934
public void mkdir ( String folderName ) { try { getDFS ( ) . mkdirs ( new Path ( this . path , folderName ) ) ; } catch ( IOException ioe ) { ioe . printStackTrace ( ) ; } doRefresh ( ) ; }
Create a new sub directory into this directory
33,935
boolean add ( Path filename ) { synchronized ( pendingReplications ) { PendingInfo found = pendingReplications . get ( filename ) ; if ( found == null ) { pendingReplications . put ( filename , new PendingInfo ( filename ) ) ; return true ; } return false ; } }
Add a block to the list of pending Replications . Returns true if the filename is added for the first time .
33,936
public Node getNode ( String name ) { Node n = hostnameToNodeMap . get ( name ) ; if ( n == null ) { n = resolveAndGetNode ( name ) ; hostnameToNodeMap . put ( name , n ) ; nodesAtMaxLevel . add ( getParentNode ( n , NetworkTopology . DEFAULT_HOST_LEVEL - 1 ) ) ; } return n ; }
Return the Node in the network topology that corresponds to the hostname
33,937
public static SequenceFile . Reader [ ] getReaders ( Configuration conf , Path dir ) throws IOException { FileSystem fs = dir . getFileSystem ( conf ) ; Path [ ] names = FileUtil . stat2Paths ( fs . listStatus ( dir ) ) ; Arrays . sort ( names ) ; SequenceFile . Reader [ ] parts = new SequenceFile . Reader [ names . length ] ; for ( int i = 0 ; i < names . length ; i ++ ) { parts [ i ] = new SequenceFile . Reader ( fs , names [ i ] , conf ) ; } return parts ; }
Open the output generated by this format .
33,938
public boolean isFaulty ( String trackerName ) { synchronized ( this ) { NodeUsageReport usageReport = usageReports . get ( trackerName ) ; return isDeadTracker ( trackerName ) || ( usageReport != null && ( usageReport . getNumFailedConnections ( ) > maxFailedConnections || usageReport . getNumFailed ( ) > maxFailures ) ) ; } }
Check if a tracker is faulty .
33,939
public int getNumFaultyTrackers ( ) { int count = 0 ; synchronized ( this ) { for ( String trackerName : usageReports . keySet ( ) ) { if ( isFaulty ( trackerName ) ) { count ++ ; } } } return count ; }
Get the number of faulty trackers .
33,940
public void recordTask ( String trackerName ) { synchronized ( this ) { NodeUsageReport usageReport = getReportUnprotected ( trackerName ) ; usageReport . setNumTotalTasks ( usageReport . getNumTotalTasks ( ) + 1 ) ; } }
Increment the number of tasks assigned to a tracker .
33,941
public void recordSucceededTask ( String trackerName ) { synchronized ( this ) { NodeUsageReport usageReport = getReportUnprotected ( trackerName ) ; usageReport . setNumSucceeded ( usageReport . getNumSucceeded ( ) + 1 ) ; } }
Increment the number of succeeded tasks on a tracker .
33,942
public void recordKilledTask ( String trackerName ) { synchronized ( this ) { NodeUsageReport usageReport = getReportUnprotected ( trackerName ) ; usageReport . setNumKilled ( usageReport . getNumKilled ( ) + 1 ) ; } }
Increment the number of killed tasks on a tracker .
33,943
public void recordFailedTask ( String trackerName ) { synchronized ( this ) { NodeUsageReport usageReport = getReportUnprotected ( trackerName ) ; usageReport . setNumFailed ( usageReport . getNumFailed ( ) + 1 ) ; } }
Increment the number of failed tasks on a tracker .
33,944
public void recordSlowTask ( String trackerName ) { synchronized ( this ) { NodeUsageReport usageReport = getReportUnprotected ( trackerName ) ; usageReport . setNumSlow ( usageReport . getNumSlow ( ) + 1 ) ; } }
Increment the number of tasks that ran slowly on a tracker .
33,945
public void recordConnectionError ( String trackerName ) { synchronized ( this ) { NodeUsageReport usageReport = getReportUnprotected ( trackerName ) ; usageReport . setNumFailedConnections ( usageReport . getNumFailedConnections ( ) + 1 ) ; } }
Increment the number of connection errors encountered on a tracker .
33,946
private NodeUsageReport getReportUnprotected ( String trackerName ) { NodeUsageReport usageReport = usageReports . get ( trackerName ) ; if ( usageReport == null ) { usageReport = new NodeUsageReport ( trackerName , 0 , 0 , 0 , 0 , 0 , 0 , 0 ) ; usageReports . put ( trackerName , usageReport ) ; } return usageReport ; }
Get the usage report for a tracker .
33,947
public void inc ( final int numOps , final long time ) { lock . lock ( ) ; try { currentData . numOperations += numOps ; currentData . time += time ; long timePerOps = time / numOps ; minMax . update ( timePerOps ) ; } finally { lock . unlock ( ) ; } }
Increment the metrics for numOps operations
33,948
public static TaskAttemptID downgrade ( org . apache . hadoop . mapreduce . TaskAttemptID old ) { if ( old instanceof TaskAttemptID ) { return ( TaskAttemptID ) old ; } else { return new TaskAttemptID ( TaskID . downgrade ( old . getTaskID ( ) ) , old . getId ( ) ) ; } }
Downgrade a new TaskAttemptID to an old one
33,949
public static long getFileLengthFromBlockSize ( long blockSize , int bytesPerChecksum , int checksumSize ) { long numChunks ; if ( blockSize % bytesPerChecksum == 0 ) { numChunks = blockSize / bytesPerChecksum ; } else { numChunks = blockSize / bytesPerChecksum + 1 ; } return blockSize + numChunks * checksumSize + BlockInlineChecksumReader . getHeaderSize ( ) ; }
get file length for the block size .
33,950
public static long getPosFromBlockOffset ( long offsetInBlock , int bytesPerChecksum , int checksumSize ) { assert offsetInBlock % bytesPerChecksum == 0 ; return getFileLengthFromBlockSize ( offsetInBlock , bytesPerChecksum , checksumSize ) ; }
Translate from block offset to position in file .
33,951
synchronized void execute ( File root , Runnable task ) { if ( executors == null ) { throw new RuntimeException ( "AsyncDiskService is already shutdown" ) ; } ThreadPoolExecutor executor = executors . get ( root ) ; if ( executor == null ) { throw new RuntimeException ( "Cannot find root " + root + " for execution of task " + task ) ; } else { executor . execute ( task ) ; } }
Execute the task sometime in the future using ThreadPools .
33,952
synchronized void shutdown ( ) { if ( executors == null ) { LOG . warn ( "AsyncDiskService has already shut down." ) ; } else { LOG . info ( "Shutting down all async disk service threads..." ) ; for ( Map . Entry < File , ThreadPoolExecutor > e : executors . entrySet ( ) ) { e . getValue ( ) . shutdown ( ) ; } executors = null ; LOG . info ( "All async disk service threads have been shut down." ) ; } }
Gracefully shut down all ThreadPool . Will wait for all deletion tasks to finish .
33,953
void deleteAsync ( FSDataset . FSVolume volume , File blockFile , File metaFile , String blockName , int namespaceId ) { DataNode . LOG . info ( "Scheduling block " + blockName + " file " + blockFile + " for deletion" ) ; ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask ( volume , blockFile , metaFile , blockName , namespaceId ) ; execute ( volume . getCurrentDir ( ) , deletionTask ) ; }
Delete the block file and meta file from the disk asynchronously adjust dfsUsed statistics accordingly .
33,954
void deleteAsyncFile ( FSDataset . FSVolume volume , File file ) { DataNode . LOG . info ( "Scheduling file " + file . toString ( ) + " for deletion" ) ; FileDeleteTask deletionTask = new FileDeleteTask ( volume , file ) ; execute ( volume . getCurrentDir ( ) , deletionTask ) ; }
Delete a file or directory from the disk asynchronously . Used for deleting obsolete block files . Does not change dfs usage stats .
33,955
private void createParent ( Path path ) throws IOException { Path parent = path . getParent ( ) ; if ( parent != null ) { String key = pathToKey ( makeAbsolute ( parent ) ) ; if ( key . length ( ) > 0 ) { store . storeEmptyFile ( key + FOLDER_SUFFIX ) ; } } }
of the source does not vanish .
33,956
@ SuppressWarnings ( "static-access" ) private static Options buildGeneralOptions ( Options opts ) { Option fs = OptionBuilder . withArgName ( "local|namenode:port" ) . hasArg ( ) . withDescription ( "specify a namenode" ) . create ( "fs" ) ; Option jt = OptionBuilder . withArgName ( "local|jobtracker:port" ) . hasArg ( ) . withDescription ( "specify a job tracker" ) . create ( "jt" ) ; Option oconf = OptionBuilder . withArgName ( "configuration file" ) . hasArg ( ) . withDescription ( "specify an application configuration file" ) . create ( "conf" ) ; Option property = OptionBuilder . withArgName ( "property=value" ) . hasArg ( ) . withDescription ( "use value for given property" ) . create ( 'D' ) ; Option libjars = OptionBuilder . withArgName ( "paths" ) . hasArg ( ) . withDescription ( "comma separated jar files to include in the classpath." ) . create ( "libjars" ) ; Option files = OptionBuilder . withArgName ( "paths" ) . hasArg ( ) . withDescription ( "comma separated files to be copied to the " + "map reduce cluster" ) . create ( "files" ) ; Option archives = OptionBuilder . withArgName ( "paths" ) . hasArg ( ) . withDescription ( "comma separated archives to be unarchived" + " on the compute machines." ) . create ( "archives" ) ; opts . addOption ( fs ) ; opts . addOption ( jt ) ; opts . addOption ( oconf ) ; opts . addOption ( property ) ; opts . addOption ( libjars ) ; opts . addOption ( files ) ; opts . addOption ( archives ) ; return opts ; }
Specify properties of each generic option
33,957
private void processGeneralOptions ( Configuration conf , CommandLine line ) { if ( line . hasOption ( "fs" ) ) { FileSystem . setDefaultUri ( conf , line . getOptionValue ( "fs" ) ) ; } if ( line . hasOption ( "jt" ) ) { conf . set ( "mapred.job.tracker" , line . getOptionValue ( "jt" ) ) ; } if ( line . hasOption ( "conf" ) ) { String [ ] values = line . getOptionValues ( "conf" ) ; for ( String value : values ) { conf . addResource ( new Path ( value ) ) ; } } try { if ( line . hasOption ( "libjars" ) ) { conf . set ( "tmpjars" , validateFiles ( line . getOptionValue ( "libjars" ) , conf ) ) ; URL [ ] libjars = getLibJars ( conf ) ; if ( libjars != null && libjars . length > 0 ) { conf . setClassLoader ( new URLClassLoader ( libjars , conf . getClassLoader ( ) ) ) ; Thread . currentThread ( ) . setContextClassLoader ( new URLClassLoader ( libjars , Thread . currentThread ( ) . getContextClassLoader ( ) ) ) ; } } if ( line . hasOption ( "files" ) ) { conf . set ( "tmpfiles" , validateFiles ( line . getOptionValue ( "files" ) , conf ) ) ; } if ( line . hasOption ( "archives" ) ) { conf . set ( "tmparchives" , validateFiles ( line . getOptionValue ( "archives" ) , conf ) ) ; } } catch ( IOException ioe ) { System . err . println ( StringUtils . stringifyException ( ioe ) ) ; } if ( line . hasOption ( 'D' ) ) { String [ ] property = line . getOptionValues ( 'D' ) ; for ( String prop : property ) { String [ ] keyval = prop . split ( "=" , 2 ) ; if ( keyval . length == 2 ) { conf . set ( keyval [ 0 ] , keyval [ 1 ] ) ; } } } conf . setBoolean ( "mapred.used.genericoptionsparser" , true ) ; }
Modify configuration according user - specified generic options
33,958
public static URL [ ] getLibJars ( Configuration conf ) throws IOException { String jars = conf . get ( "tmpjars" ) ; if ( jars == null ) { return null ; } String [ ] files = jars . split ( "," ) ; URL [ ] cp = new URL [ files . length ] ; for ( int i = 0 ; i < cp . length ; i ++ ) { Path tmp = new Path ( files [ i ] ) ; cp [ i ] = FileSystem . getLocal ( conf ) . pathToFile ( tmp ) . toURI ( ) . toURL ( ) ; } return cp ; }
If libjars are set in the conf parse the libjars .
33,959
private String [ ] parseGeneralOptions ( Options opts , Configuration conf , String [ ] args ) { opts = buildGeneralOptions ( opts ) ; CommandLineParser parser = new GnuParser ( ) ; try { commandLine = parser . parse ( opts , args , true ) ; processGeneralOptions ( conf , commandLine ) ; return commandLine . getArgs ( ) ; } catch ( ParseException e ) { LOG . warn ( "options parsing failed: " + e . getMessage ( ) ) ; HelpFormatter formatter = new HelpFormatter ( ) ; formatter . printHelp ( "general options are: " , opts ) ; } return args ; }
Parse the user - specified options get the generic options and modify configuration accordingly
33,960
public static void printGenericCommandUsage ( PrintStream out ) { out . println ( "Generic options supported are" ) ; out . println ( "-conf <configuration file> specify an application configuration file" ) ; out . println ( "-D <property=value> use value for given property" ) ; out . println ( "-fs <local|namenode:port> specify a namenode" ) ; out . println ( "-jt <local|jobtracker:port> specify a job tracker" ) ; out . println ( "-files <comma separated list of files> " + "specify comma separated files to be copied to the map reduce cluster" ) ; out . println ( "-libjars <comma separated list of jars> " + "specify comma separated jar files to include in the classpath." ) ; out . println ( "-archives <comma separated list of archives> " + "specify comma separated archives to be unarchived" + " on the compute machines.\n" ) ; out . println ( "The general command line syntax is" ) ; out . println ( "bin/hadoop command [genericOptions] [commandOptions]\n" ) ; }
Print the usage message for generic command - line options supported .
33,961
public void addNode ( String name , Set < ResourceType > resourceTypes ) { List < FaultStatsForType > faultStats = new ArrayList < FaultStatsForType > ( resourceTypes . size ( ) ) ; for ( ResourceType type : resourceTypes ) { faultStats . add ( new FaultStatsForType ( type ) ) ; } nodeToFaultStats . put ( name , faultStats ) ; }
Notify the fault manager of a new node .
33,962
public void nodeFeedback ( String nodeName , List < ResourceType > resourceTypes , NodeUsageReport usageReport ) { List < FaultStatsForType > faultStats = nodeToFaultStats . get ( nodeName ) ; if ( faultStats == null ) { LOG . info ( "Received node feedback for deleted node " + nodeName ) ; return ; } boolean statsModified = false ; synchronized ( faultStats ) { if ( tooManyFailedConnectionsInSession ( usageReport ) ) { for ( FaultStatsForType stat : faultStats ) { if ( resourceTypes . contains ( stat . type ) ) { stat . numSessionsWithFailedConnections ++ ; statsModified = true ; } } } if ( tooManyFailuresInSession ( usageReport ) ) { for ( FaultStatsForType stat : faultStats ) { if ( resourceTypes . contains ( stat . type ) ) { stat . numSessionsWithTooManyFailures ++ ; statsModified = true ; } } } } if ( statsModified ) { blacklistIfNeeded ( nodeName , faultStats ) ; } }
Provide the fault manager with new feedback about a node .
33,963
public boolean isBlacklisted ( String nodeName , ResourceType type ) { List < ResourceType > blacklistedResourceTypes = blacklistedNodes . get ( nodeName ) ; if ( blacklistedResourceTypes != null ) { synchronized ( blacklistedResourceTypes ) { return blacklistedResourceTypes . contains ( type ) ; } } else { return false ; } }
Check if a resource on a node is blacklisted .
33,964
public List < String > getBlacklistedNodes ( ) { List < String > ret = new ArrayList < String > ( ) ; for ( String nodeName : blacklistedNodes . keySet ( ) ) { ret . add ( nodeName ) ; } return ret ; }
Return the list of blacklisted nodes .
33,965
private void blacklistIfNeeded ( String nodeName , List < FaultStatsForType > faultStats ) { for ( FaultStatsForType stat : faultStats ) { if ( isBlacklisted ( nodeName , stat . type ) ) { continue ; } if ( tooManyFailuresOnNode ( stat ) || tooManyConnectionFailuresOnNode ( stat ) ) { nm . blacklistNode ( nodeName , stat . type ) ; blacklist ( nodeName , stat . type ) ; } } }
Checks if a node needs to be blacklisted and blacklists it .
33,966
private void blacklist ( String nodeName , ResourceType type ) { List < ResourceType > blacklistedResourceTypes = blacklistedNodes . get ( nodeName ) ; if ( blacklistedResourceTypes == null ) { blacklistedResourceTypes = new ArrayList < ResourceType > ( ) ; blacklistedNodes . put ( nodeName , blacklistedResourceTypes ) ; } synchronized ( blacklistedResourceTypes ) { if ( ! blacklistedResourceTypes . contains ( type ) ) { blacklistedResourceTypes . add ( type ) ; } } }
Blacklists a resource on a node .
33,967
void checkPermission ( String path , INode [ ] inodes , boolean doCheckOwner , FsAction ancestorAccess , FsAction parentAccess , FsAction access , FsAction subAccess ) throws AccessControlException { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "ACCESS CHECK: " + this + ", doCheckOwner=" + doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess + ", access=" + access + ", subAccess=" + subAccess ) ; } int ancestorIndex = inodes . length - 2 ; for ( ; ancestorIndex >= 0 && inodes [ ancestorIndex ] == null ; ancestorIndex -- ) ; checkTraverse ( inodes , ancestorIndex ) ; if ( ancestorAccess != null && inodes . length > 1 ) { check ( inodes , ancestorIndex , ancestorAccess ) ; } if ( parentAccess != null && inodes . length > 1 ) { check ( inodes , inodes . length - 2 , parentAccess ) ; } if ( access != null ) { check ( inodes [ inodes . length - 1 ] , access ) ; } if ( subAccess != null ) { checkSubAccess ( inodes [ inodes . length - 1 ] , subAccess ) ; } if ( doCheckOwner ) { checkOwner ( inodes [ inodes . length - 1 ] ) ; } }
Check whether current user have permissions to access the path . Traverse is always checked .
33,968
public static Collection < String > convertResourceTypesToStrings ( Collection < ResourceType > resourceTypes ) { List < String > retList = new ArrayList < String > ( resourceTypes . size ( ) ) ; for ( ResourceType resourceType : resourceTypes ) { retList . add ( resourceType . toString ( ) ) ; } return retList ; }
Convert resource types of a collection of String objects
33,969
public static String validateAttributeNames ( Enumeration < String > attributeNames ) { while ( attributeNames . hasMoreElements ( ) ) { String attribute = attributeNames . nextElement ( ) ; if ( ! attribute . equals ( "users" ) && ! attribute . equals ( "poolGroups" ) && ! attribute . equals ( "poolInfos" ) && ! attribute . equals ( "toKillSessionId" ) && ! attribute . equals ( "killSessionsToken" ) ) { return "Illegal parameter " + attribute + ", only 'users, " + "poolGroups, 'poolInfos', 'toKillSessionId' and 'killSessionsToken'" + "parameters allowed." ; } } return null ; }
Check the attribute names
33,970
public static boolean isValidKillSessionsToken ( String token ) { if ( token == null || token . isEmpty ( ) ) { return false ; } for ( String validToken : VALID_TOKENS ) { if ( token . equals ( validToken ) ) { return true ; } } return false ; }
Check if the kill sessions token is a valid one
33,971
public static JspParameterFilters getJspParameterFilters ( String userFilter , String poolGroupFilter , String poolInfoFilter ) { JspParameterFilters filters = new JspParameterFilters ( ) ; if ( userFilter != null && ! userFilter . equals ( "null" ) ) { filters . getUserFilterSet ( ) . addAll ( Arrays . asList ( userFilter . split ( "," ) ) ) ; filters . getHtmlOutput ( ) . append ( ( "<b>users:</b> " + userFilter + "<br>" ) ) ; } if ( poolGroupFilter != null && ! poolGroupFilter . equals ( "null" ) ) { filters . getPoolGroupFilterSet ( ) . addAll ( Arrays . asList ( poolGroupFilter . split ( "," ) ) ) ; filters . getHtmlOutput ( ) . append ( "<b>poolGroups:</b> " + poolGroupFilter + "<br>" ) ; } if ( poolInfoFilter != null && ! poolInfoFilter . equals ( "null" ) ) { filters . getHtmlOutput ( ) . append ( "<b>poolInfos:</b> " + poolInfoFilter + "<br>" ) ; for ( String poolInfoString : poolInfoFilter . split ( "," ) ) { String [ ] poolInfoStrings = poolInfoString . split ( "[.]" ) ; if ( poolInfoStrings . length == 2 ) { filters . getPoolInfoFilterSet ( ) . add ( new PoolInfo ( poolInfoStrings [ 0 ] , poolInfoStrings [ 1 ] ) ) ; } } } return filters ; }
Convert the parameters to filters and html output
33,972
synchronized void setInputFromSavedData ( ) { int len = Math . min ( userBufLen , uncompressedDirectBuf . remaining ( ) ) ; ( ( ByteBuffer ) uncompressedDirectBuf ) . put ( userBuf , userBufOff , len ) ; userBufLen -= len ; userBufOff += len ; uncompressedDirectBufLen = uncompressedDirectBuf . position ( ) ; }
copy enough data from userBuf to uncompressedDirectBuf
33,973
public void abort ( ) { try { super . close ( ) ; } catch ( IOException ioe ) { LOG . warn ( "Unable to abort file " + tmpFile , ioe ) ; } if ( ! tmpFile . delete ( ) ) { LOG . warn ( "Unable to delete tmp file during abort " + tmpFile ) ; } }
Close the atomic file but do not commit the temporary file on top of the destination . This should be used if there is a failure in writing .
33,974
public static SimulatorJobTracker startTracker ( JobConf conf , long startTime , SimulatorEngine engine ) throws IOException { SimulatorJobTracker result = null ; try { SimulatorClock simClock = new SimulatorClock ( startTime ) ; result = new SimulatorJobTracker ( conf , simClock , engine ) ; result . taskScheduler . setTaskTrackerManager ( result ) ; } catch ( IOException e ) { LOG . warn ( "Error starting tracker: " + StringUtils . stringifyException ( e ) ) ; } catch ( InterruptedException e ) { LOG . warn ( "Error starting tracker: " + StringUtils . stringifyException ( e ) ) ; } if ( result != null ) { JobEndNotifier . startNotifier ( ) ; } return result ; }
Starts the JobTracker with given configuration and a given time . It also starts the JobNotifier thread .
33,975
public static SimulatorJobTracker startTracker ( JobConf conf , long startTime ) throws IOException , InterruptedException { return startTracker ( conf , startTime , new SimulatorEngine ( ) ) ; }
Start the SimulatorJobTracker with given configuration after creating its own SimulatorEngine . Pretty much used for debugging only .
33,976
static Clock getClock ( ) { assert ( engine . getCurrentTime ( ) == clock . getTime ( ) ) : " Engine time = " + engine . getCurrentTime ( ) + " JobTracker time = " + clock . getTime ( ) ; return clock ; }
Returns the simulatorClock in that is a static object in SimulatorJobTracker .
33,977
private void cleanupJob ( JobInProgress job ) { cleanupQueue . add ( job . getJobID ( ) ) ; while ( cleanupQueue . size ( ) > JOBS_IN_MUMAK_MEMORY ) { JobID removedJob = cleanupQueue . poll ( ) ; } }
The cleanupJob method maintains the queue cleanQueue . When a job is finalized it is added to the cleanupQueue . Jobs are removed from the cleanupQueue so that its size is maintained to be less than that specified by JOBS_IN_MUMAK_MEMORY .
33,978
private void validateAndSetClock ( long newSimulationTime ) { long currentSimulationTime = clock . getTime ( ) ; if ( newSimulationTime < currentSimulationTime ) { throw new IllegalArgumentException ( "Time has gone backwards! " + "newSimulationTime: " + newSimulationTime + " while currentTime: " + currentSimulationTime ) ; } assert ( newSimulationTime == engine . getCurrentTime ( ) ) : " newTime =" + newSimulationTime + " engineTime = " + engine . getCurrentTime ( ) ; clock . setTime ( newSimulationTime ) ; }
Utility to validate the current simulation time
33,979
private List < TaskTrackerAction > getMapCompletionTasks ( TaskTrackerStatus status , List < TaskTrackerAction > tasksToKill ) { boolean loggingEnabled = LOG . isDebugEnabled ( ) ; Set < TaskAttemptID > killedTasks = new HashSet < TaskAttemptID > ( ) ; if ( tasksToKill != null ) { for ( TaskTrackerAction taskToKill : tasksToKill ) { killedTasks . add ( ( ( KillTaskAction ) taskToKill ) . getTaskID ( ) ) ; } } String trackerName = status . getTrackerName ( ) ; List < TaskTrackerAction > actions = new ArrayList < TaskTrackerAction > ( ) ; for ( TaskStatus report : status . getTaskReports ( ) ) { TaskAttemptID taskAttemptId = report . getTaskID ( ) ; SimulatorJobInProgress job = getSimulatorJob ( taskAttemptId . getJobID ( ) ) ; if ( job == null ) { Set < JobID > jobsToCleanup = trackerToJobsToCleanup . get ( trackerName ) ; if ( jobsToCleanup == null ) { jobsToCleanup = new HashSet < JobID > ( ) ; trackerToJobsToCleanup . put ( trackerName , jobsToCleanup ) ; } jobsToCleanup . add ( taskAttemptId . getJobID ( ) ) ; continue ; } JobStatus jobStatus = job . getStatus ( ) ; TaskInProgress tip = taskidToTIPMap . get ( taskAttemptId ) ; if ( jobStatus . getRunState ( ) == JobStatus . RUNNING && tip . isRunningTask ( taskAttemptId ) && ! killedTasks . contains ( taskAttemptId ) && ! report . getIsMap ( ) && report . getPhase ( ) == TaskStatus . Phase . SHUFFLE ) { if ( loggingEnabled ) { LOG . debug ( "Need map-completion information for REDUCEattempt " + taskAttemptId + " in tracker " + trackerName ) ; LOG . debug ( "getMapCompletion: job=" + job . getJobID ( ) + " pendingMaps=" + job . pendingMaps ( ) ) ; } boolean canSendMapCompletion = false ; canSendMapCompletion = ( job . finishedMaps ( ) == job . desiredMaps ( ) ) ; if ( canSendMapCompletion ) { if ( loggingEnabled ) { LOG . debug ( "Adding MapCompletion for taskAttempt " + taskAttemptId + " in tracker " + trackerName ) ; LOG . debug ( "FinishedMaps for job:" + job . getJobID ( ) + " is = " + job . finishedMaps ( ) + "/" + job . desiredMaps ( ) ) ; LOG . debug ( "AllMapsCompleted for task " + taskAttemptId + " time=" + getClock ( ) . getTime ( ) ) ; } actions . add ( new AllMapsCompletedTaskAction ( taskAttemptId ) ) ; } } } return actions ; }
The getMapCompletion method is intended to inform taskTrackes when to change the status of reduce tasks from shuffle to reduce . For all reduce tasks in this TaskTracker that are in the shuffle phase getMapCompletionTasks finds the number of finished maps for this job from the jobInProgress object . If this number equals the number of desired maps for this job then it adds an AllMapsCompletedTaskAction for this reduce task - attempt .
33,980
private InputStream nextEntryStream ( ) throws IOException { long nextLedgerEntryId = currentStreamState . getNextLedgerEntryId ( ) ; if ( nextLedgerEntryId > maxLedgerEntryIdSeen ) { updateMaxLedgerEntryIdSeen ( ) ; if ( nextLedgerEntryId > maxLedgerEntryIdSeen ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Requesting to ledger entryId " + nextLedgerEntryId + ", but " + " maxLedgerEntryIdSeen is " + maxLedgerEntryIdSeen + ", ledger length is " + ledger . getLength ( ) ) ; } return null ; } } try { Enumeration < LedgerEntry > entries = ledger . readEntries ( nextLedgerEntryId , nextLedgerEntryId ) ; currentStreamState . incrementNextLedgerEntryId ( ) ; if ( entries . hasMoreElements ( ) ) { LedgerEntry entry = entries . nextElement ( ) ; if ( entries . hasMoreElements ( ) ) { throw new IllegalStateException ( "More than one entry retrieved!" ) ; } currentStreamState . setOffsetInEntry ( 0 ) ; return entry . getEntryInputStream ( ) ; } } catch ( BKException e ) { throw new IOException ( "Unrecoverable BookKeeper error reading entry " + nextLedgerEntryId , e ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new IOException ( "Interrupted reading BookKeeper entry " + nextLedgerEntryId , e ) ; } return null ; }
a new stream for a new ledger entry
33,981
public void position ( long position ) throws IOException { if ( position == 0 ) { currentStreamState . setNextLedgerEntryId ( firstLedgerEntryId ) ; currentStreamState . setOffsetInEntry ( 0 ) ; entryStream = null ; } else if ( savedStreamState == null || position != savedStreamState . getReaderPosition ( ) ) { if ( position > Integer . MAX_VALUE ) { throw new IllegalArgumentException ( "Asked to position to " + position + ", but can only \"brute-force\" skip up" + Integer . MAX_VALUE ) ; } position ( 0 ) ; skip ( position , ( int ) position ) ; } else { int bytesToSkip = 0 ; if ( savedStreamState . getOffsetInLedger ( ) > position ) { long entryStartPosition = savedStreamState . getOffsetInLedger ( ) - savedStreamState . getOffsetInEntry ( ) ; bytesToSkip = ( int ) ( position - entryStartPosition ) ; } else if ( savedStreamState . getOffsetInLedger ( ) < position ) { throw new IllegalArgumentException ( "Saved offset in ledger (" + savedStreamState . getOffsetInLedger ( ) + ") < position(" + position + ")" ) ; } long nextLedgerEntryId = savedStreamState . getNextLedgerEntryId ( ) == firstLedgerEntryId ? firstLedgerEntryId : ( savedStreamState . getNextLedgerEntryId ( ) - 1 ) ; currentStreamState . setNextLedgerEntryId ( nextLedgerEntryId ) ; if ( bytesToSkip > 0 ) { entryStream = null ; skip ( position , bytesToSkip ) ; } else { if ( currentStreamState . getNextLedgerEntryId ( ) > 0 ) { currentStreamState . setNextLedgerEntryId ( currentStreamState . getNextLedgerEntryId ( ) - 1 ) ; } entryStream = nextEntryStream ( ) ; } } currentStreamState . setOffsetInLedger ( position ) ; }
Go back to the specified reader position by resetting the reader a saved state associated with that position .
33,982
private void verifyParity ( String [ ] args , int startIndex ) { boolean restoreReplication = false ; int repl = - 1 ; Path root = null ; for ( int i = startIndex ; i < args . length ; i ++ ) { String arg = args [ i ] ; if ( arg . equals ( "-restore" ) ) { restoreReplication = true ; } else if ( arg . equals ( "-repl" ) ) { i ++ ; if ( i >= args . length ) { throw new IllegalArgumentException ( "Missing repl after -r option" ) ; } repl = Integer . parseInt ( args [ i ] ) ; } else { root = new Path ( arg ) ; } } if ( root == null ) { throw new IllegalArgumentException ( "Too few arguments" ) ; } if ( repl == - 1 ) { throw new IllegalArgumentException ( "Need to specify -r option" ) ; } if ( repl < 1 || repl > 3 ) { throw new IllegalArgumentException ( "repl could only in the range [1, 3]" ) ; } Codec matched = null ; String rootPath = root . toUri ( ) . getPath ( ) ; if ( ! rootPath . endsWith ( Path . SEPARATOR ) ) { rootPath += Path . SEPARATOR ; } for ( Codec code : Codec . getCodecs ( ) ) { if ( rootPath . startsWith ( code . getParityPrefix ( ) ) ) { matched = code ; break ; } } if ( matched == null ) { throw new IllegalArgumentException ( "root needs to starts with parity dirs" ) ; } try { FileSystem fs = root . getFileSystem ( conf ) ; conf . set ( FileSystem . FS_DEFAULT_NAME_KEY , fs . getUri ( ) . toString ( ) ) ; ParityVerifier pv = new ParityVerifier ( conf , restoreReplication , repl , matched ) ; pv . verifyParities ( root , System . out ) ; } catch ( IOException ex ) { System . err . println ( "findMissingParityFiles: " + ex ) ; } }
search each parity and verify the source files have the expected replication
33,983
public Path [ ] recover ( String cmd , String argv [ ] , int startindex ) throws IOException { Path [ ] paths = new Path [ ( argv . length - startindex ) / 2 ] ; int j = 0 ; for ( int i = startindex ; i < argv . length ; i = i + 2 ) { String path = argv [ i ] ; long corruptOffset = Long . parseLong ( argv [ i + 1 ] ) ; LOG . info ( "RaidShell recoverFile for " + path + " corruptOffset " + corruptOffset ) ; Path recovered = new Path ( "/tmp/recovered." + System . currentTimeMillis ( ) ) ; FileSystem fs = recovered . getFileSystem ( conf ) ; DistributedFileSystem dfs = ( DistributedFileSystem ) fs ; Configuration raidConf = new Configuration ( conf ) ; raidConf . set ( "fs.hdfs.impl" , "org.apache.hadoop.hdfs.DistributedRaidFileSystem" ) ; raidConf . set ( "fs.raid.underlyingfs.impl" , "org.apache.hadoop.hdfs.DistributedFileSystem" ) ; raidConf . setBoolean ( "fs.hdfs.impl.disable.cache" , true ) ; java . net . URI dfsUri = dfs . getUri ( ) ; FileSystem raidFs = FileSystem . get ( dfsUri , raidConf ) ; FileUtil . copy ( raidFs , new Path ( path ) , fs , recovered , false , conf ) ; paths [ j ] = recovered ; LOG . info ( "Raidshell created recovery file " + paths [ j ] ) ; j ++ ; } return paths ; }
Recovers the specified path from the parity file
33,984
protected boolean isFileCorrupt ( final DistributedFileSystem dfs , final FileStatus fileStat ) throws IOException { return isFileCorrupt ( dfs , fileStat , false , conf , this . numNonRaidedMissingBlks , this . numStrpMissingBlksMap ) ; }
checks whether a file has more than the allowable number of corrupt blocks and must therefore be considered corrupt
33,985
public static Method tryGetMethod ( String id ) { if ( id . length ( ) != NAME_LEN ) { return null ; } return idToMethod . get ( id ) ; }
Tries to get a method given the id . Returns null if no such method is registered .
33,986
public void setCombinerClass ( Class < ? extends Reducer > cls ) throws IllegalStateException { ensureState ( JobState . DEFINE ) ; conf . setClass ( COMBINE_CLASS_ATTR , cls , Reducer . class ) ; }
Set the combiner class for the job .
33,987
public void setMapOutputKeyClass ( Class < ? > theClass ) throws IllegalStateException { ensureState ( JobState . DEFINE ) ; conf . setMapOutputKeyClass ( theClass ) ; }
Set the key class for the map output data . This allows the user to specify the map output key class to be different than the final output value class .
33,988
public void setMapOutputValueClass ( Class < ? > theClass ) throws IllegalStateException { ensureState ( JobState . DEFINE ) ; conf . setMapOutputValueClass ( theClass ) ; }
Set the value class for the map output data . This allows the user to specify the map output value class to be different than the final output value class .
33,989
public void setOutputKeyClass ( Class < ? > theClass ) throws IllegalStateException { ensureState ( JobState . DEFINE ) ; conf . setOutputKeyClass ( theClass ) ; }
Set the key class for the job output data .
33,990
public void setOutputValueClass ( Class < ? > theClass ) throws IllegalStateException { ensureState ( JobState . DEFINE ) ; conf . setOutputValueClass ( theClass ) ; }
Set the value class for job outputs .
33,991
public void setJobName ( String name ) throws IllegalStateException { ensureState ( JobState . DEFINE ) ; conf . setJobName ( name ) ; }
Set the user - specified job name .
33,992
public void killTask ( TaskAttemptID taskId ) throws IOException { ensureState ( JobState . RUNNING ) ; info . killTask ( org . apache . hadoop . mapred . TaskAttemptID . downgrade ( taskId ) , false ) ; }
Kill indicated task attempt .
33,993
public Counters getCounters ( ) throws IOException { ensureState ( JobState . RUNNING ) ; org . apache . hadoop . mapred . Counters ctrs = info . getCounters ( ) ; if ( ctrs == null ) { return null ; } else { return new Counters ( ctrs ) ; } }
Gets the counters for this job .
33,994
private void setUseNewAPI ( ) throws IOException { int numReduces = conf . getNumReduceTasks ( ) ; String oldMapperClass = "mapred.mapper.class" ; String oldReduceClass = "mapred.reducer.class" ; conf . setBooleanIfUnset ( "mapred.mapper.new-api" , conf . get ( oldMapperClass ) == null ) ; if ( conf . getUseNewMapper ( ) ) { String mode = "new map API" ; ensureNotSet ( "mapred.input.format.class" , mode ) ; ensureNotSet ( oldMapperClass , mode ) ; if ( numReduces != 0 ) { ensureNotSet ( "mapred.partitioner.class" , mode ) ; } else { ensureNotSet ( "mapred.output.format.class" , mode ) ; } } else { String mode = "map compatability" ; ensureNotSet ( JobContext . INPUT_FORMAT_CLASS_ATTR , mode ) ; ensureNotSet ( JobContext . MAP_CLASS_ATTR , mode ) ; if ( numReduces != 0 ) { ensureNotSet ( JobContext . PARTITIONER_CLASS_ATTR , mode ) ; } else { ensureNotSet ( JobContext . OUTPUT_FORMAT_CLASS_ATTR , mode ) ; } } if ( numReduces != 0 ) { conf . setBooleanIfUnset ( "mapred.reducer.new-api" , conf . get ( oldReduceClass ) == null ) ; if ( conf . getUseNewReducer ( ) ) { String mode = "new reduce API" ; ensureNotSet ( "mapred.output.format.class" , mode ) ; ensureNotSet ( oldReduceClass , mode ) ; } else { String mode = "reduce compatability" ; ensureNotSet ( JobContext . OUTPUT_FORMAT_CLASS_ATTR , mode ) ; ensureNotSet ( JobContext . REDUCE_CLASS_ATTR , mode ) ; } } }
Default to the new APIs unless they are explicitly set or the old mapper or reduce attributes are used .
33,995
public void submit ( ) throws IOException , InterruptedException , ClassNotFoundException { ensureState ( JobState . DEFINE ) ; setUseNewAPI ( ) ; info = jobClient . submitJobInternal ( conf ) ; state = JobState . RUNNING ; }
Submit the job to the cluster and return immediately .
33,996
public boolean waitForCompletion ( boolean verbose ) throws IOException , InterruptedException , ClassNotFoundException { if ( state == JobState . DEFINE ) { submit ( ) ; } if ( verbose ) { jobClient . monitorAndPrintJob ( conf , info ) ; } else { info . waitForCompletion ( ) ; } return isSuccessful ( ) ; }
Submit the job to the cluster and wait for it to finish .
33,997
protected void setUserJobConfProps ( boolean doEarlyProps ) { Iterator it = userJobConfProps_ . keySet ( ) . iterator ( ) ; while ( it . hasNext ( ) ) { String key = ( String ) it . next ( ) ; String val = ( String ) userJobConfProps_ . get ( key ) ; boolean earlyName = key . equals ( "fs.default.name" ) ; earlyName |= key . equals ( "stream.shipped.hadoopstreaming" ) ; if ( doEarlyProps == earlyName ) { msg ( "xxxJobConf: set(" + key + ", " + val + ") early=" + doEarlyProps ) ; jobConf_ . set ( key , val ) ; } } }
This method sets the user jobconf variable specified by user using - jobconf key = value
33,998
public static Map < String , Map < String , String > > getJournalStats ( Collection < Journal > journals ) { Map < String , Map < String , String > > stats = new HashMap < String , Map < String , String > > ( ) ; for ( Journal j : journals ) { try { Map < String , String > stat = new HashMap < String , String > ( ) ; stats . put ( j . getJournalId ( ) , stat ) ; stat . put ( "Txid committed" , Long . toString ( j . getCommittedTxnId ( ) ) ) ; stat . put ( "Txid segment" , Long . toString ( j . getCurrentSegmentTxId ( ) ) ) ; stat . put ( "Txid written" , Long . toString ( j . getHighestWrittenTxId ( ) ) ) ; stat . put ( "Current lag" , Long . toString ( j . getCurrentLagTxns ( ) ) ) ; stat . put ( "Writer epoch" , Long . toString ( j . getLastWriterEpoch ( ) ) ) ; } catch ( IOException e ) { LOG . error ( "Error when collectng stats" , e ) ; } } return stats ; }
Get journal stats for webui .
33,999
static void sendResponse ( String output , HttpServletResponse response ) throws IOException { PrintWriter out = null ; try { out = response . getWriter ( ) ; out . write ( output ) ; } finally { if ( out != null ) { out . close ( ) ; } } }
Send string output when serving http request .