idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,700
@ SuppressWarnings ( "deprecation" ) void init ( ) throws InterruptedException , IOException { long now = System . currentTimeMillis ( ) ; JobConf jobConf = new JobConf ( getConf ( ) ) ; jobConf . setClass ( "topology.node.switch.mapping.impl" , StaticMapping . class , DNSToSwitchMapping . class ) ; jobConf . set ( "fs.default.name" , "file:///" ) ; jobConf . set ( "mapred.job.tracker" , "localhost:8012" ) ; jobConf . setInt ( "mapred.jobtracker.job.history.block.size" , 512 ) ; jobConf . setInt ( "mapred.jobtracker.job.history.buffer.size" , 512 ) ; jobConf . setLong ( "mapred.tasktracker.expiry.interval" , 5000 ) ; jobConf . setInt ( "mapred.reduce.copy.backoff" , 4 ) ; jobConf . setLong ( "mapred.job.reuse.jvm.num.tasks" , - 1 ) ; jobConf . setUser ( "mumak" ) ; jobConf . set ( "mapred.system.dir" , jobConf . get ( "hadoop.log.dir" , "/tmp/hadoop-" + jobConf . getUser ( ) ) + "/mapred/system" ) ; jobConf . set ( "mapred.jobtracker.taskScheduler" , JobQueueTaskScheduler . class . getName ( ) ) ; FileSystem lfs = FileSystem . getLocal ( getConf ( ) ) ; Path logPath = new Path ( System . getProperty ( "hadoop.log.dir" ) ) . makeQualified ( lfs ) ; jobConf . set ( "mapred.system.dir" , logPath . toString ( ) ) ; jobConf . set ( "hadoop.job.history.location" , ( new Path ( logPath , "history" ) . toString ( ) ) ) ; jt = SimulatorJobTracker . startTracker ( jobConf , now , this ) ; jt . offerService ( ) ; int maxMaps = getConf ( ) . getInt ( "mapred.tasktracker.map.tasks.maximum" , DEFAULT_MAP_SLOTS_PER_NODE ) ; int maxReduces = getConf ( ) . getInt ( "mapred.tasktracker.reduce.tasks.maximum" , DEFAULT_REDUCE_SLOTS_PER_NODE ) ; MachineNode defaultNode = new MachineNode . Builder ( "default" , 2 ) . setMapSlots ( maxMaps ) . setReduceSlots ( maxReduces ) . build ( ) ; ZombieCluster cluster = new ZombieCluster ( new Path ( topologyFile ) , defaultNode , jobConf ) ; long firstJobStartTime = now + 60000 ; JobStoryProducer jobStoryProducer = new SimulatorJobStoryProducer ( new Path ( traceFile ) , cluster , firstJobStartTime , jobConf ) ; jc = new SimulatorJobClient ( jt , jobStoryProducer ) ; queue . addAll ( jc . init ( firstJobStartTime ) ) ; startTaskTrackers ( cluster , now ) ; terminateTime = getConf ( ) . getLong ( "mumak.terminate.time" , Long . MAX_VALUE ) ; if ( terminateTime <= 0 ) { throw new IllegalArgumentException ( "Terminate time must be positive: " + terminateTime ) ; } }
Initiate components in the simulation .
33,701
public static void main ( String [ ] args ) throws IOException { if ( args . length != 1 ) { System . err . println ( "Usage: JenkinsHash filename" ) ; System . exit ( - 1 ) ; } FileInputStream in = new FileInputStream ( args [ 0 ] ) ; byte [ ] bytes = new byte [ 512 ] ; int value = 0 ; JenkinsHash hash = new JenkinsHash ( ) ; for ( int length = in . read ( bytes ) ; length > 0 ; length = in . read ( bytes ) ) { value = hash . hash ( bytes , length , value ) ; } System . out . println ( Math . abs ( value ) ) ; }
Compute the hash of the specified file
33,702
void setStartTime ( long startTime ) { if ( startTime > 0 ) { this . startTime = startTime ; } else { LOG . error ( "Trying to set illegal startTime for task : " + taskid + ".Stack trace is : " + StringUtils . stringifyException ( new Exception ( ) ) ) ; } }
Set startTime of the task if start time is greater than zero .
33,703
void setPhase ( Phase phase ) { TaskStatus . Phase oldPhase = getPhase ( ) ; if ( oldPhase != phase ) { if ( phase == TaskStatus . Phase . SORT ) { setShuffleFinishTime ( JobTracker . getClock ( ) . getTime ( ) ) ; } else if ( phase == TaskStatus . Phase . REDUCE ) { setSortFinishTime ( JobTracker . getClock ( ) . getTime ( ) ) ; } } this . phase = phase ; }
Set current phase of this task .
33,704
synchronized void statusUpdate ( State runState , float progress , String state , Phase phase , long finishTime ) { setRunState ( runState ) ; setProgress ( progress ) ; setStateString ( state ) ; setPhase ( phase ) ; if ( finishTime > 0 ) { setFinishTime ( finishTime ) ; } }
Update specific fields of task status
33,705
public static void verifySavedMD5 ( File dataFile , MD5Hash expectedMD5 ) throws IOException { MD5Hash storedHash = readStoredMd5ForFile ( dataFile ) ; if ( ! expectedMD5 . equals ( storedHash ) ) { throw new IOException ( "File " + dataFile + " did not match stored MD5 checksum " + " (stored: " + storedHash + ", computed: " + expectedMD5 ) ; } }
Verify that the previously saved md5 for the given file matches expectedMd5 .
33,706
public static MD5Hash readStoredMd5ForFile ( File dataFile ) throws IOException { File md5File = getDigestFileForFile ( dataFile ) ; String md5Line ; if ( ! md5File . exists ( ) ) { return null ; } BufferedReader reader = new BufferedReader ( new FileReader ( md5File ) ) ; try { md5Line = reader . readLine ( ) ; if ( md5Line == null ) { md5Line = "" ; } md5Line = md5Line . trim ( ) ; } catch ( IOException ioe ) { throw new IOException ( "Error reading md5 file at " + md5File , ioe ) ; } finally { IOUtils . cleanup ( LOG , reader ) ; } Matcher matcher = LINE_REGEX . matcher ( md5Line ) ; if ( ! matcher . matches ( ) ) { throw new IOException ( "Invalid MD5 file at " + md5File + " (does not match expected pattern)" ) ; } String storedHash = matcher . group ( 1 ) ; File referencedFile = new File ( matcher . group ( 2 ) ) ; if ( ! referencedFile . getName ( ) . equals ( dataFile . getName ( ) ) ) { throw new IOException ( "MD5 file at " + md5File + " references file named " + referencedFile . getName ( ) + " but we expected it to reference " + dataFile ) ; } return new MD5Hash ( storedHash ) ; }
Read the md5 checksum stored alongside the given file or null if no md5 is stored .
33,707
public static MD5Hash computeMd5ForFile ( File dataFile ) throws IOException { InputStream in = new FileInputStream ( dataFile ) ; try { MessageDigest digester = MD5Hash . getDigester ( ) ; DigestInputStream dis = new DigestInputStream ( in , digester ) ; IOUtils . copyBytes ( dis , new IOUtils . NullOutputStream ( ) , 128 * 1024 , false ) ; return new MD5Hash ( digester . digest ( ) ) ; } finally { IOUtils . closeStream ( in ) ; } }
Read dataFile and compute its MD5 checksum .
33,708
public static void saveMD5File ( File dataFile , MD5Hash digest ) throws IOException { File md5File = getDigestFileForFile ( dataFile ) ; String digestString = StringUtils . byteToHexString ( digest . getDigest ( ) ) ; String md5Line = digestString + " *" + dataFile . getName ( ) + "\n" ; AtomicFileOutputStream afos = new AtomicFileOutputStream ( md5File ) ; afos . write ( md5Line . getBytes ( ) ) ; afos . close ( ) ; LOG . info ( "Saved MD5 " + digest + " to " + md5File ) ; }
Save the . md5 file that lists the md5sum of another file .
33,709
public int compareTo ( Object o ) { int thisValue = this . value ; int thatValue = ( ( VIntWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; }
Compares two VIntWritables .
33,710
private void incrementOpCodeCount ( Byte opCode ) { if ( ! opCodeCount . containsKey ( opCode ) ) { opCodeCount . put ( opCode , 0L ) ; } Long newValue = opCodeCount . get ( opCode ) + 1 ; opCodeCount . put ( opCode , newValue ) ; }
Increment the op code counter
33,711
public FSEditLogOp readOp ( ) throws IOException { FSEditLogOp ret ; if ( cachedOp != null ) { ret = cachedOp ; cachedOp = null ; return ret ; } return nextOp ( ) ; }
Read an operation from the stream
33,712
public boolean skipUntil ( long txid ) throws IOException { while ( true ) { FSEditLogOp op = readOp ( ) ; if ( op == null ) { return false ; } if ( op . getTransactionId ( ) >= txid ) { cachedOp = op ; return true ; } } }
Skip edit log operations up to a given transaction ID or until the end of the edit log is reached .
33,713
public static Path getSkipOutputPath ( Configuration conf ) { String name = conf . get ( OUT_PATH ) ; if ( name != null ) { if ( "none" . equals ( name ) ) { return null ; } return new Path ( name ) ; } Path outPath = FileOutputFormat . getOutputPath ( new JobConf ( conf ) ) ; return outPath == null ? null : new Path ( outPath , "_logs" + Path . SEPARATOR + "skip" ) ; }
Get the directory to which skipped records are written . By default it is the sub directory of the output _logs directory . User can stop writing skipped records by setting the value null .
33,714
public static void setSkipOutputPath ( JobConf conf , Path path ) { String pathStr = null ; if ( path == null ) { pathStr = "none" ; } else { pathStr = path . toString ( ) ; } conf . set ( OUT_PATH , pathStr ) ; }
Set the directory to which skipped records are written . By default it is the sub directory of the output _logs directory . User can stop writing skipped records by setting the value null .
33,715
public RawKeyValueIterator sort ( ) { MergeSort m = new MergeSort ( this ) ; int count = super . count ; if ( count == 0 ) return null ; int [ ] pointers = super . pointers ; int [ ] pointersCopy = new int [ count ] ; System . arraycopy ( pointers , 0 , pointersCopy , 0 , count ) ; m . mergeSort ( pointers , pointersCopy , 0 , count ) ; return new MRSortResultIterator ( super . keyValBuffer , pointersCopy , super . startOffsets , super . keyLengths , super . valueLengths ) ; }
The sort method derived from BasicTypeSorterBase and overridden here
33,716
public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeObjectField ( "request" , request ) ; jsonGenerator . writeEndObject ( ) ; }
This method writes the ResourceRequestInfo instance to disk
33,717
public synchronized void write ( int b ) throws IOException { eventStartWrite ( ) ; try { sum . update ( b ) ; buf [ count ++ ] = ( byte ) b ; if ( bytesSentInChunk + count == buf . length ) { flushBuffer ( true , shouldKeepPartialChunkData ( ) ) ; } } finally { eventEndWrite ( ) ; } }
Write one byte
33,718
private int write1 ( byte b [ ] , int off , int len ) throws IOException { eventStartWrite ( ) ; try { if ( count == 0 && bytesSentInChunk + len >= buf . length ) { final int length = buf . length - bytesSentInChunk ; sum . update ( b , off , length ) ; writeChecksumChunk ( b , off , length , false ) ; bytesSentInChunk = 0 ; return length ; } int bytesToCopy = buf . length - bytesSentInChunk - count ; bytesToCopy = ( len < bytesToCopy ) ? len : bytesToCopy ; sum . update ( b , off , bytesToCopy ) ; System . arraycopy ( b , off , buf , count , bytesToCopy ) ; count += bytesToCopy ; if ( count + bytesSentInChunk == buf . length ) { flushBuffer ( true , shouldKeepPartialChunkData ( ) ) ; } return bytesToCopy ; } finally { eventEndWrite ( ) ; } }
Write a portion of an array flushing to the underlying stream at most once if necessary .
33,719
private void writeChecksumChunk ( byte b [ ] , int off , int len , boolean keep ) throws IOException { int tempChecksum = ( int ) sum . getValue ( ) ; if ( ! keep ) { sum . reset ( ) ; } int2byte ( tempChecksum , checksum ) ; writeChunk ( b , off , len , checksum ) ; }
Generate checksum for the data chunk and output data chunk & checksum to the underlying output stream . If keep is true then keep the current checksum intact do not reset it .
33,720
public String getUserName ( ) { int n = ( int ) PermissionStatusFormat . USER . retrieve ( permission ) ; return SerialNumberManager . INSTANCE . getUser ( n ) ; }
Get user name
33,721
public String getGroupName ( ) { int n = ( int ) PermissionStatusFormat . GROUP . retrieve ( permission ) ; return SerialNumberManager . INSTANCE . getGroup ( n ) ; }
Get group name
33,722
public static void enforceRegularStorageINode ( INodeFile inode , String msg ) throws IOException { if ( inode . getStorageType ( ) != StorageType . REGULAR_STORAGE ) { LOG . error ( msg ) ; throw new IOException ( msg ) ; } }
Verify if file is regular storage otherwise throw an exception
33,723
static byte [ ] [ ] getPathComponents ( String [ ] strings ) { if ( strings . length == 0 ) { return new byte [ ] [ ] { null } ; } byte [ ] [ ] bytes = new byte [ strings . length ] [ ] ; for ( int i = 0 ; i < strings . length ; i ++ ) bytes [ i ] = DFSUtil . string2Bytes ( strings [ i ] ) ; return bytes ; }
Convert strings to byte arrays for path components .
33,724
static String [ ] getPathNames ( String path ) { if ( path == null || ! path . startsWith ( Path . SEPARATOR ) ) { return null ; } return StringUtils . split ( path , Path . SEPARATOR_CHAR ) ; }
Breaks file path into names .
33,725
public final int compareTo ( byte [ ] name2 ) { if ( name == name2 ) return 0 ; int len1 = ( name == null ? 0 : name . length ) ; int len2 = ( name2 == null ? 0 : name2 . length ) ; int n = Math . min ( len1 , len2 ) ; byte b1 , b2 ; for ( int i = 0 ; i < n ; i ++ ) { b1 = name [ i ] ; b2 = name2 [ i ] ; if ( b1 != b2 ) return b1 - b2 ; } return len1 - len2 ; }
Compare names of the inodes
33,726
static INode newINode ( long id , PermissionStatus permissions , BlockInfo [ ] blocks , short replication , long modificationTime , long atime , long nsQuota , long dsQuota , long preferredBlockSize , byte inodeType , long hardLinkID , RaidCodec codec , FSImageLoadingContext context ) { if ( inodeType == INode . INodeType . REGULAR_INODE . type ) { if ( blocks == null ) { if ( nsQuota >= 0 || dsQuota >= 0 ) { return new INodeDirectoryWithQuota ( id , permissions , modificationTime , nsQuota , dsQuota ) ; } return new INodeDirectory ( id , permissions , modificationTime ) ; } return new INodeFile ( id , permissions , blocks , replication , modificationTime , atime , preferredBlockSize , null ) ; } else if ( inodeType == INode . INodeType . HARDLINKED_INODE . type ) { HardLinkFileInfo hardLinkFileInfo = INodeHardLinkFile . loadHardLinkFileInfo ( hardLinkID , context ) ; if ( hardLinkFileInfo . getReferenceCnt ( ) > 0 ) { blocks = hardLinkFileInfo . getHardLinkedFile ( 0 ) . getBlocks ( ) ; } INodeHardLinkFile hardLinkFile = new INodeHardLinkFile ( id , permissions , blocks , replication , modificationTime , atime , preferredBlockSize , hardLinkFileInfo ) ; hardLinkFile . incReferenceCnt ( ) ; return hardLinkFile ; } else if ( inodeType == INode . INodeType . RAIDED_INODE . type ) { return new INodeFile ( id , permissions , blocks , replication , modificationTime , atime , preferredBlockSize , codec ) ; } else { throw new IllegalArgumentException ( "Invalide inode type: " + inodeType ) ; } }
Create an INode ; the inode s name is not set yet
33,727
private void waitForUpgradeDone ( int namespaceId ) { UpgradeManagerDatanode um = datanode . getUpgradeManager ( namespaceId ) ; while ( ! um . isUpgradeCompleted ( ) ) { try { datanode . updateAndReportThreadLiveness ( BackgroundThread . BLOCK_SCANNER ) ; Thread . sleep ( 5000 ) ; LOG . info ( "sleeping ............" ) ; } catch ( InterruptedException e ) { blockScannerThread . interrupt ( ) ; return ; } } }
Wait for upgrading done for the given namespace
33,728
private DataBlockScanner getNextNamespaceSliceScanner ( int currentNamespaceId ) { Integer nextNsId = null ; while ( ( nextNsId == null ) && datanode . shouldRun && ! blockScannerThread . isInterrupted ( ) ) { waitForOneNameSpaceUp ( ) ; synchronized ( this ) { if ( getNamespaceSetSize ( ) > 0 ) { long lastScanTime = - 1 ; Iterator < Integer > nsidIterator = namespaceScannerMap . keySet ( ) . iterator ( ) ; while ( nsidIterator . hasNext ( ) ) { int nsid = nsidIterator . next ( ) ; for ( FSDataset . FSVolume vol : dataset . volumes . getVolumes ( ) ) { try { File currFile = DataBlockScanner . getCurrentFile ( vol , nsid ) ; if ( currFile . exists ( ) ) { long lastModified = currFile . lastModified ( ) ; if ( lastScanTime < lastModified ) { lastScanTime = lastModified ; nextNsId = nsid ; } } } catch ( IOException e ) { LOG . warn ( "Received exception: " , e ) ; } } } if ( nextNsId == null ) { try { if ( currentNamespaceId == - 1 ) { nextNsId = namespaceScannerMap . firstKey ( ) ; } else { nextNsId = namespaceScannerMap . higherKey ( currentNamespaceId ) ; if ( nextNsId == null ) { nextNsId = namespaceScannerMap . firstKey ( ) ; } } } catch ( NoSuchElementException e ) { continue ; } } if ( nextNsId != null ) { return getNSScanner ( nextNsId ) ; } } } LOG . warn ( "No namespace is up, going to wait" ) ; try { Thread . sleep ( 5000 ) ; } catch ( InterruptedException ex ) { LOG . warn ( "Received exception: " + ex ) ; blockScannerThread . interrupt ( ) ; return null ; } } return null ; }
Find next namespaceId to scan . There should be only one current verification log file . Find which namespace contains the current verification log file and that is used as the starting namespaceId . If no current files are found start with first namespace . However if more than one current files are found the one with latest modification time is used to find the next namespaceId .
33,729
public int [ ] hash ( Key k ) { byte [ ] b = k . getBytes ( ) ; if ( b == null ) { throw new NullPointerException ( "buffer reference is null" ) ; } if ( b . length == 0 ) { throw new IllegalArgumentException ( "key length must be > 0" ) ; } int [ ] result = new int [ nbHash ] ; for ( int i = 0 , initval = 0 ; i < nbHash ; i ++ ) { initval = hashFunction . hash ( b , initval ) ; result [ i ] = Math . abs ( initval % maxValue ) ; } return result ; }
Hashes a specified key into several integers .
33,730
protected Connector createBaseListener ( Configuration conf ) throws IOException { Connector ret ; if ( conf . getBoolean ( "hadoop.http.bio" , false ) ) { SocketConnector conn = new SocketConnector ( ) ; conn . setAcceptQueueSize ( 4096 ) ; conn . setResolveNames ( false ) ; ret = conn ; } else { SelectChannelConnector conn = new SelectChannelConnector ( ) ; conn . setAcceptQueueSize ( 128 ) ; conn . setResolveNames ( false ) ; conn . setUseDirectBuffers ( false ) ; ret = conn ; } ret . setLowResourceMaxIdleTime ( 10000 ) ; ret . setHeaderBufferSize ( conf . getInt ( "hadoop.http.header.buffer.size" , 4096 ) ) ; ret . setMaxIdleTime ( conf . getInt ( "dfs.http.timeout" , 200000 ) ) ; return ret ; }
Create a required listener for the Jetty instance listening on the port provided . This wrapper and all subclasses must create at least one listener .
33,731
private static FilterInitializer [ ] getFilterInitializers ( Configuration conf ) { if ( conf == null ) { return null ; } Class < ? > [ ] classes = conf . getClasses ( FILTER_INITIALIZER_PROPERTY ) ; if ( classes == null ) { return null ; } FilterInitializer [ ] initializers = new FilterInitializer [ classes . length ] ; for ( int i = 0 ; i < classes . length ; i ++ ) { initializers [ i ] = ( FilterInitializer ) ReflectionUtils . newInstance ( classes [ i ] , conf ) ; } return initializers ; }
Get an array of FilterConfiguration specified in the conf
33,732
protected void addDefaultApps ( ContextHandlerCollection parent , final String appDir ) throws IOException { String logDir = System . getProperty ( "hadoop.log.dir" ) ; if ( logDir != null ) { Context logContext = new Context ( parent , "/logs" ) ; logContext . setResourceBase ( logDir ) ; logContext . addServlet ( StaticServlet . class , "/" ) ; defaultContexts . put ( logContext , true ) ; } Context staticContext = new Context ( parent , "/static" ) ; staticContext . setResourceBase ( appDir + "/static" ) ; staticContext . addServlet ( StaticServlet . class , "/*" ) ; defaultContexts . put ( staticContext , true ) ; }
Add default apps .
33,733
protected void addDefaultServlets ( ) { addServlet ( "stacks" , "/stacks" , StackServlet . class ) ; addServlet ( "logLevel" , "/logLevel" , LogLevel . Servlet . class ) ; addServlet ( "jmx" , "/jmx" , JMXJsonServlet . class ) ; addServlet ( "metrics" , "/metrics" , MetricsServlet . class ) ; addServlet ( "conf" , "/conf" , ConfServlet . class ) ; }
Add default servlets .
33,734
protected void addContext ( String pathSpec , String dir , boolean isFiltered ) throws IOException { if ( 0 == webServer . getHandlers ( ) . length ) { throw new RuntimeException ( "Couldn't find handler" ) ; } WebAppContext webAppCtx = new WebAppContext ( ) ; webAppCtx . setContextPath ( pathSpec ) ; webAppCtx . setWar ( dir ) ; addContext ( webAppCtx , true ) ; }
Add a context
33,735
public void addServlet ( String name , String pathSpec , Class < ? extends HttpServlet > clazz ) { addInternalServlet ( name , pathSpec , clazz ) ; addFilterPathMapping ( pathSpec , webAppContext ) ; }
Add a servlet in the server .
33,736
public void addInternalServlet ( String name , String pathSpec , Class < ? extends HttpServlet > clazz ) { ServletHolder holder = new ServletHolder ( clazz ) ; if ( name != null ) { holder . setName ( name ) ; } webAppContext . addServlet ( holder , pathSpec ) ; }
Add an internal servlet in the server .
33,737
public void removeServlet ( String name , String pathSpec , Class < ? extends HttpServlet > clazz ) { if ( clazz == null ) { return ; } ServletHandler servletHandler = webAppContext . getServletHandler ( ) ; List < FilterMapping > newFilterMappings = new ArrayList < FilterMapping > ( ) ; for ( FilterMapping mapping : servletHandler . getFilterMappings ( ) ) { for ( String mappingPathSpec : mapping . getPathSpecs ( ) ) { if ( ! mappingPathSpec . equals ( pathSpec ) ) { newFilterMappings . add ( mapping ) ; } } } servletHandler . setFilterMappings ( newFilterMappings . toArray ( new FilterMapping [ newFilterMappings . size ( ) ] ) ) ; removeInternalServlet ( name , pathSpec , clazz ) ; }
Remove a servlet in the server .
33,738
public void removeInternalServlet ( String name , String pathSpec , Class < ? extends HttpServlet > clazz ) { if ( null == clazz ) { return ; } ServletHandler servletHandler = webAppContext . getServletHandler ( ) ; List < ServletHolder > newServletHolders = new ArrayList < ServletHolder > ( ) ; List < ServletMapping > newServletMappings = new ArrayList < ServletMapping > ( ) ; String clazzName = clazz . getName ( ) ; Set < String > holdersToRemove = new HashSet < String > ( ) ; for ( ServletHolder holder : servletHandler . getServlets ( ) ) { try { if ( clazzName . equals ( holder . getServlet ( ) . getClass ( ) . getName ( ) ) && name . equals ( holder . getName ( ) ) ) { holdersToRemove . add ( holder . getName ( ) ) ; } else { newServletHolders . add ( holder ) ; } } catch ( ServletException e ) { LOG . error ( "exception in removeInternalServlet() when iterating through" + "servlet holders" + StringUtils . stringifyException ( e ) ) ; } } if ( holdersToRemove . size ( ) < 1 ) { return ; } for ( ServletMapping mapping : servletHandler . getServletMappings ( ) ) { if ( ! holdersToRemove . contains ( mapping . getServletName ( ) ) ) { newServletMappings . add ( mapping ) ; } else { String [ ] pathSpecs = mapping . getPathSpecs ( ) ; boolean pathSpecMatched = false ; if ( pathSpecs != null && pathSpecs . length > 0 ) { for ( String pathSpecInMapping : pathSpecs ) { if ( pathSpecInMapping . equals ( pathSpec ) ) { pathSpecMatched = true ; break ; } } } if ( ! pathSpecMatched ) { newServletMappings . add ( mapping ) ; } } } servletHandler . setServletMappings ( newServletMappings . toArray ( new ServletMapping [ newServletMappings . size ( ) ] ) ) ; servletHandler . setServlets ( newServletHolders . toArray ( new ServletHolder [ newServletHolders . size ( ) ] ) ) ; }
Remove an internal servlet in the server .
33,739
protected void defineFilter ( Context ctx , String name , String classname , Map < String , String > parameters , String [ ] urls ) { FilterHolder holder = new FilterHolder ( ) ; holder . setName ( name ) ; holder . setClassName ( classname ) ; holder . setInitParameters ( parameters ) ; FilterMapping fmap = new FilterMapping ( ) ; fmap . setPathSpecs ( urls ) ; fmap . setDispatches ( Handler . ALL ) ; fmap . setFilterName ( name ) ; ServletHandler handler = ctx . getServletHandler ( ) ; handler . addFilter ( holder , fmap ) ; }
Define a filter for a context and set up default url mappings .
33,740
protected void addFilterPathMapping ( String pathSpec , Context webAppCtx ) { ServletHandler handler = webAppCtx . getServletHandler ( ) ; for ( String name : filterNames ) { FilterMapping fmap = new FilterMapping ( ) ; fmap . setPathSpec ( pathSpec ) ; fmap . setFilterName ( name ) ; fmap . setDispatches ( Handler . ALL ) ; handler . addFilterMapping ( fmap ) ; } }
Add the path spec to the filter path mapping .
33,741
protected String getWebAppsPath ( ) throws IOException { URL url = getClass ( ) . getClassLoader ( ) . getResource ( "webapps" ) ; if ( url == null ) throw new IOException ( "webapps not found in CLASSPATH" ) ; return url . toString ( ) ; }
Get the pathname to the webapps files .
33,742
public void stop ( ) throws Exception { listener . close ( ) ; webAppContext . clearAttributes ( ) ; webServer . removeHandler ( webAppContext ) ; webServer . stop ( ) ; }
stop the server
33,743
protected String getAttribute ( String attributeName ) { String factoryAttribute = contextName + "." + attributeName ; return ( String ) factory . getAttribute ( factoryAttribute ) ; }
Convenience method for subclasses to access factory attributes .
33,744
public synchronized void registerUpdater ( final Updater updater ) { if ( ! updaters . containsKey ( updater ) ) { updaters . put ( updater , Boolean . TRUE ) ; } }
Registers a callback to be called at time intervals determined by the configuration .
33,745
private synchronized void startTimer ( ) { if ( timer == null ) { timer = new Timer ( "Timer thread for monitoring " + getContextName ( ) , true ) ; TimerTask task = new TimerTask ( ) { public void run ( ) { try { timerEvent ( ) ; } catch ( IOException ioe ) { ioe . printStackTrace ( ) ; } } } ; long millis = period * 1000 ; timer . scheduleAtFixedRate ( task , millis , millis ) ; } }
Starts timer if it is not already started
33,746
private void timerEvent ( ) throws IOException { if ( isMonitoring ) { Collection < Updater > myUpdaters ; synchronized ( this ) { myUpdaters = new ArrayList < Updater > ( updaters . keySet ( ) ) ; } for ( Updater updater : myUpdaters ) { try { updater . doUpdates ( this ) ; } catch ( Throwable throwable ) { throwable . printStackTrace ( ) ; } } emitRecords ( ) ; } }
Timer callback .
33,747
private synchronized void emitRecords ( ) throws IOException { for ( String recordName : bufferedData . keySet ( ) ) { RecordMap recordMap = bufferedData . get ( recordName ) ; synchronized ( recordMap ) { Set < Entry < TagMap , MetricMap > > entrySet = recordMap . entrySet ( ) ; for ( Entry < TagMap , MetricMap > entry : entrySet ) { OutputRecord outRec = new OutputRecord ( entry . getKey ( ) , entry . getValue ( ) ) ; emitRecord ( contextName , recordName , outRec ) ; } } } flush ( ) ; }
Emits the records .
33,748
private Number sum ( Number a , Number b ) { if ( a instanceof Integer ) { return Integer . valueOf ( a . intValue ( ) + b . intValue ( ) ) ; } else if ( a instanceof Float ) { return new Float ( a . floatValue ( ) + b . floatValue ( ) ) ; } else if ( a instanceof Short ) { return Short . valueOf ( ( short ) ( a . shortValue ( ) + b . shortValue ( ) ) ) ; } else if ( a instanceof Byte ) { return Byte . valueOf ( ( byte ) ( a . byteValue ( ) + b . byteValue ( ) ) ) ; } else if ( a instanceof Long ) { return Long . valueOf ( ( a . longValue ( ) + b . longValue ( ) ) ) ; } else { throw new MetricsException ( "Invalid number type" ) ; } }
Adds two numbers coercing the second to the type of the first .
33,749
protected void parseAndSetPeriod ( String attributeName ) { String periodStr = getAttribute ( attributeName ) ; if ( periodStr != null ) { int period = 0 ; try { period = Integer . parseInt ( periodStr ) ; } catch ( NumberFormatException nfe ) { } if ( period <= 0 ) { throw new MetricsException ( "Invalid period: " + periodStr ) ; } setPeriod ( period ) ; } }
If a period is set in the attribute passed in override the default with it .
33,750
public synchronized Map < String , Collection < OutputRecord > > getAllRecords ( ) { Map < String , Collection < OutputRecord > > out = new TreeMap < String , Collection < OutputRecord > > ( ) ; for ( String recordName : bufferedData . keySet ( ) ) { RecordMap recordMap = bufferedData . get ( recordName ) ; synchronized ( recordMap ) { List < OutputRecord > records = new ArrayList < OutputRecord > ( ) ; Set < Entry < TagMap , MetricMap > > entrySet = recordMap . entrySet ( ) ; for ( Entry < TagMap , MetricMap > entry : entrySet ) { OutputRecord outRec = new OutputRecord ( entry . getKey ( ) , entry . getValue ( ) ) ; records . add ( outRec ) ; } out . put ( recordName , records ) ; } } return out ; }
Retrieves all the records managed by this MetricsContext . Useful for monitoring systems that are polling - based .
33,751
public List < SimulatorEvent > accept ( SimulatorEvent event ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Accepted event " + event ) ; } if ( event instanceof HeartbeatEvent ) { return processHeartbeatEvent ( ( HeartbeatEvent ) event ) ; } else if ( event instanceof TaskAttemptCompletionEvent ) { return processTaskAttemptCompletionEvent ( ( TaskAttemptCompletionEvent ) event ) ; } else { throw new IllegalArgumentException ( "Unhandled event " + event ) ; } }
Processes a simulation event .
33,752
public List < SimulatorEvent > init ( long when ) { LOG . debug ( "TaskTracker starting up, current simulation time=" + when ) ; return Collections . < SimulatorEvent > singletonList ( new HeartbeatEvent ( this , when ) ) ; }
Called once at the start of the simulation .
33,753
private void finishRunningTask ( TaskStatus finalStatus , long now ) { TaskAttemptID taskId = finalStatus . getTaskID ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Finishing running task id=" + taskId + ", now=" + now ) ; } SimulatorTaskInProgress tip = tasks . get ( taskId ) ; if ( tip == null ) { throw new IllegalArgumentException ( "Unknown task attempt " + taskId + " completed" ) ; } TaskStatus currentStatus = tip . getTaskStatus ( ) ; if ( currentStatus . getRunState ( ) != State . RUNNING ) { throw new IllegalArgumentException ( "Task attempt to finish is not running: " + tip ) ; } State finalRunState = finalStatus . getRunState ( ) ; if ( finalRunState != State . SUCCEEDED && finalRunState != State . FAILED && finalRunState != State . KILLED ) { throw new IllegalArgumentException ( "Final run state for completed task can't be : " + finalRunState + " " + tip ) ; } if ( now != finalStatus . getFinishTime ( ) ) { throw new IllegalArgumentException ( "Current time does not match task finish time: now=" + now + ", finish=" + finalStatus . getFinishTime ( ) ) ; } if ( currentStatus . getIsMap ( ) != finalStatus . getIsMap ( ) || currentStatus . getNumSlots ( ) != finalStatus . getNumSlots ( ) || currentStatus . getPhase ( ) != finalStatus . getPhase ( ) || currentStatus . getStartTime ( ) != finalStatus . getStartTime ( ) ) { throw new IllegalArgumentException ( "Current status does not match final status" ) ; } currentStatus . setRunState ( finalStatus . getRunState ( ) ) ; currentStatus . setFinishTime ( finalStatus . getFinishTime ( ) ) ; currentStatus . setProgress ( finalStatus . getProgress ( ) ) ; int numSlots = currentStatus . getNumSlots ( ) ; if ( tip . isMapTask ( ) ) { usedMapSlots -= numSlots ; if ( usedMapSlots < 0 ) { throw new IllegalStateException ( "TaskTracker reaches negative map slots: " + usedMapSlots ) ; } } else { usedReduceSlots -= numSlots ; if ( usedReduceSlots < 0 ) { throw new IllegalStateException ( "TaskTracker reaches negative reduce slots: " + usedReduceSlots ) ; } } }
Stops running a task attempt on the task tracker . It also updates the number of available slots accordingly .
33,754
private List < SimulatorEvent > processTaskAttemptCompletionEvent ( TaskAttemptCompletionEvent event ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Processing task attempt completion event" + event ) ; } long now = event . getTimeStamp ( ) ; TaskStatus finalStatus = event . getStatus ( ) ; TaskAttemptID taskID = finalStatus . getTaskID ( ) ; boolean killedEarlier = orphanTaskCompletions . remove ( taskID ) ; if ( ! killedEarlier ) { finishRunningTask ( finalStatus , now ) ; } return SimulatorEngine . EMPTY_EVENTS ; }
Records that a task attempt has completed . Ignores the event for tasks that got killed after the creation of the completion event .
33,755
private TaskAttemptCompletionEvent createTaskAttemptCompletionEvent ( SimulatorTaskInProgress tip , long now ) { TaskStatus status = ( TaskStatus ) tip . getTaskStatus ( ) . clone ( ) ; long delta = tip . getUserSpaceRunTime ( ) ; assert delta >= 0 : "TaskAttempt " + tip . getTaskStatus ( ) . getTaskID ( ) + " has negative UserSpaceRunTime = " + delta ; long finishTime = now + delta ; status . setFinishTime ( finishTime ) ; status . setProgress ( 1.0f ) ; status . setRunState ( tip . getFinalRunState ( ) ) ; TaskAttemptCompletionEvent event = new TaskAttemptCompletionEvent ( this , status ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Created task attempt completion event " + event ) ; } return event ; }
Creates a signal for itself marking the completion of a task attempt . It assumes that the task attempt hasn t made any progress in the user space code so far i . e . it is called right at launch for map tasks and immediately after all maps completed for reduce tasks .
33,756
private List < SimulatorEvent > handleSimulatorLaunchTaskAction ( SimulatorLaunchTaskAction action , long now ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Handling launch task action " + action ) ; } Task task = action . getTask ( ) ; TaskAttemptID taskId = task . getTaskID ( ) ; if ( tasks . containsKey ( taskId ) ) { throw new IllegalArgumentException ( "Multiple launch of task id =" + taskId ) ; } org . apache . hadoop . mapred . TaskAttemptID taskIdOldApi = org . apache . hadoop . mapred . TaskAttemptID . downgrade ( taskId ) ; TaskStatus status ; int numSlotsRequired = task . getNumSlotsRequired ( ) ; Counters emptyCounters = new Counters ( ) ; if ( task . isMapTask ( ) ) { status = new MapTaskStatus ( taskIdOldApi , 0f , numSlotsRequired , State . RUNNING , "" , "" , taskTrackerName , Phase . MAP , emptyCounters ) ; usedMapSlots += numSlotsRequired ; if ( usedMapSlots > maxMapSlots ) { throw new IllegalStateException ( "usedMapSlots exceeds maxMapSlots: " + usedMapSlots + " > " + maxMapSlots ) ; } } else { status = new ReduceTaskStatus ( taskIdOldApi , 0f , numSlotsRequired , State . RUNNING , "" , "" , taskTrackerName , Phase . SHUFFLE , emptyCounters ) ; usedReduceSlots += numSlotsRequired ; if ( usedReduceSlots > maxReduceSlots ) { throw new IllegalStateException ( "usedReduceSlots exceeds usedReduceSlots: " + usedReduceSlots + " > " + usedReduceSlots ) ; } } status . setStartTime ( now ) ; SimulatorTaskInProgress tip = new SimulatorTaskInProgress ( action , status , now ) ; tasks . put ( taskId , tip ) ; if ( task . isMapTask ( ) ) { TaskAttemptCompletionEvent e = createTaskAttemptCompletionEvent ( tip , now ) ; return Collections . < SimulatorEvent > singletonList ( e ) ; } else { return SimulatorEngine . EMPTY_EVENTS ; } }
Launches a task on the simulated task tracker .
33,757
private List < SimulatorEvent > handleKillTaskAction ( KillTaskAction action , long now ) { TaskAttemptID taskId = action . getTaskID ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Handling kill task action, taskId=" + taskId + ", now=" + now ) ; } SimulatorTaskInProgress tip = tasks . get ( taskId ) ; if ( tip == null ) { return SimulatorEngine . EMPTY_EVENTS ; } progressTaskStatus ( tip , now ) ; TaskStatus finalStatus = ( TaskStatus ) tip . getTaskStatus ( ) . clone ( ) ; finalStatus . setFinishTime ( now ) ; finalStatus . setRunState ( State . KILLED ) ; finishRunningTask ( finalStatus , now ) ; if ( finalStatus . getIsMap ( ) || finalStatus . getPhase ( ) == Phase . REDUCE ) { orphanTaskCompletions . add ( taskId ) ; } return SimulatorEngine . EMPTY_EVENTS ; }
Kills a task attempt .
33,758
private void progressTaskStatus ( SimulatorTaskInProgress tip , long now ) { TaskStatus status = tip . getTaskStatus ( ) ; if ( status . getRunState ( ) != State . RUNNING ) { return ; } boolean isMap = tip . isMapTask ( ) ; long startTime = - 1 ; long runTime = tip . getUserSpaceRunTime ( ) ; float progress = 0.0f ; if ( isMap ) { startTime = status . getStartTime ( ) ; progress = ( ( float ) ( now - startTime ) ) / runTime ; } else { Phase reducePhase = status . getPhase ( ) ; switch ( reducePhase ) { case SHUFFLE : progress = 0.0f ; break ; case SORT : progress = 1.0f / 3 ; break ; case REDUCE : { startTime = status . getSortFinishTime ( ) ; progress = 2.0f / 3 + ( ( ( float ) ( now - startTime ) ) / runTime ) / 3.0f ; } break ; default : throw new IllegalArgumentException ( "Invalid reducePhase=" + reducePhase ) ; } } final float EPSILON = 0.0001f ; if ( progress < - EPSILON || progress > 1 + EPSILON ) { throw new IllegalStateException ( "Task progress out of range: " + progress ) ; } progress = Math . max ( Math . min ( 1.0f , progress ) , 0.0f ) ; status . setProgress ( progress ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Updated task progress, taskId=" + status . getTaskID ( ) + ", progress=" + status . getProgress ( ) ) ; } }
Updates the progress indicator of a task if it is running .
33,759
private void garbageCollectCompletedTasks ( ) { for ( Iterator < TaskAttemptID > iter = tasks . keySet ( ) . iterator ( ) ; iter . hasNext ( ) ; ) { TaskAttemptID taskId = iter . next ( ) ; SimulatorTaskInProgress tip = tasks . get ( taskId ) ; if ( tip . getTaskStatus ( ) . getRunState ( ) != State . RUNNING ) { iter . remove ( ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Garbage collected SimulatorTIP, taskId=" + taskId ) ; } } } }
Frees up bookkeping memory used by completed tasks . Has no effect on the events or logs produced by the SimulatorTaskTracker . We need this in order not to report completed task multiple times and to ensure that we do not run out of Java heap memory in larger simulations .
33,760
private List < SimulatorEvent > processHeartbeatEvent ( HeartbeatEvent event ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Processing heartbeat event " + event ) ; } long now = event . getTimeStamp ( ) ; progressTaskStatuses ( now ) ; List < TaskStatus > taskStatuses = collectAndCloneTaskStatuses ( ) ; boolean askForNewTask = ( usedMapSlots < maxMapSlots || usedReduceSlots < maxReduceSlots ) ; TaskTrackerStatus taskTrackerStatus = new SimulatorTaskTrackerStatus ( taskTrackerName , hostName , httpPort , taskStatuses , 0 , maxMapSlots , maxReduceSlots , now ) ; garbageCollectCompletedTasks ( ) ; HeartbeatResponse response = null ; try { response = jobTracker . heartbeat ( taskTrackerStatus , false , firstHeartbeat , askForNewTask , heartbeatResponseId ) ; } catch ( IOException ioe ) { throw new IllegalStateException ( "Internal error" , ioe ) ; } firstHeartbeat = false ; heartbeatResponseId = response . getResponseId ( ) ; List < SimulatorEvent > events = handleHeartbeatResponse ( response , now ) ; events . add ( new HeartbeatEvent ( this , now + response . getHeartbeatInterval ( ) ) ) ; return events ; }
Transmits a heartbeat event to the jobtracker and processes the response .
33,761
public StripeInfo retrieveStripe ( Block lostBlock , Path p , long lostBlockOffset , FileSystem fs , Context context , boolean online ) throws IOException { StripeInfo si = null ; if ( stripeStore != null ) { IOException caughtException = null ; try { si = stripeStore . getStripe ( codec , lostBlock ) ; } catch ( IOException ioe ) { LOG . error ( " Fail to get stripe " + codec + " : " + lostBlock , ioe ) ; caughtException = ioe ; } if ( si == null ) { LogUtils . logRaidReconstructionMetrics ( LOGRESULTS . FAILURE , 0 , codec , p , lostBlockOffset , online ? LOGTYPES . ONLINE_RECONSTRUCTION_GET_STRIPE : LOGTYPES . OFFLINE_RECONSTRUCTION_GET_STRIPE , fs , caughtException , context ) ; } } return si ; }
Retrieve stripes from stripe store
33,762
public Long retrieveChecksum ( Block lostBlock , Path p , long lostBlockOffset , FileSystem fs , Context context ) throws IOException { Long oldCRC = null ; if ( checksumStore != null ) { IOException caughtException = null ; try { oldCRC = checksumStore . getChecksum ( lostBlock ) ; } catch ( IOException ioe ) { LOG . error ( " Fail to get checksum for block " + lostBlock , ioe ) ; caughtException = ioe ; } if ( oldCRC == null ) { LogUtils . logRaidReconstructionMetrics ( LOGRESULTS . FAILURE , 0 , codec , p , lostBlockOffset , LOGTYPES . OFFLINE_RECONSTRUCTION_GET_CHECKSUM , fs , caughtException , context ) ; } } return oldCRC ; }
Retrieve checksums from checksum store and record checksum lost if possible
33,763
public CRC32 recoverBlockToFileFromStripeInfo ( FileSystem srcFs , Path srcPath , Block lostBlock , File localBlockFile , long blockSize , long lostBlockOffset , long limit , StripeInfo si , Context context ) throws IOException { OutputStream out = null ; try { out = new FileOutputStream ( localBlockFile ) ; CRC32 crc = null ; if ( checksumStore != null ) { crc = new CRC32 ( ) ; } fixErasedBlockImpl ( srcFs , srcPath , srcFs , null , true , blockSize , lostBlockOffset , limit , false , out , context , crc , si , true , lostBlock ) ; return crc ; } finally { if ( null != out ) { out . close ( ) ; } } }
Recover a corrupt block to local file . Using the stripe information stored in the Stripe Store .
33,764
private String getOldCodeId ( FileStatus srcStat ) throws IOException { if ( codec . id . equals ( "xor" ) || codec . id . equals ( "rs" ) ) { return codec . id ; } else { if ( ParityFilePair . getParityFile ( Codec . getCodec ( "xor" ) , srcStat , this . conf ) != null ) return "xor" ; if ( ParityFilePair . getParityFile ( Codec . getCodec ( "rs" ) , srcStat , this . conf ) != null ) return "rs" ; } return null ; }
Return the old code id to construct a old decoder
33,765
private static void setupSsl ( Configuration conf ) { Configuration sslConf = new Configuration ( false ) ; sslConf . addResource ( conf . get ( "dfs.https.client.keystore.resource" , "ssl-client.xml" ) ) ; System . setProperty ( "javax.net.ssl.trustStore" , sslConf . get ( "ssl.client.truststore.location" , "" ) ) ; System . setProperty ( "javax.net.ssl.trustStorePassword" , sslConf . get ( "ssl.client.truststore.password" , "" ) ) ; System . setProperty ( "javax.net.ssl.trustStoreType" , sslConf . get ( "ssl.client.truststore.type" , "jks" ) ) ; System . setProperty ( "javax.net.ssl.keyStore" , sslConf . get ( "ssl.client.keystore.location" , "" ) ) ; System . setProperty ( "javax.net.ssl.keyStorePassword" , sslConf . get ( "ssl.client.keystore.password" , "" ) ) ; System . setProperty ( "javax.net.ssl.keyPassword" , sslConf . get ( "ssl.client.keystore.keypassword" , "" ) ) ; System . setProperty ( "javax.net.ssl.keyStoreType" , sslConf . get ( "ssl.client.keystore.type" , "jks" ) ) ; }
Set up SSL resources
33,766
void setup ( ) { if ( supportsIsTaskAlive ( ) ) { waitForConfirmedKill = getConf ( ) . getBoolean ( WAIT_FOR_CONFIRMED_KILL_KEY , WAIT_FOR_CONFIRMED_DEFAULT ) ; confirmedKillRetries = getConf ( ) . getInt ( CONFIRMED_KILL_RETRIES_KEY , CONFIRMED_KILL_RETRIES_DEFAULT ) ; } LOG . info ( "setup: waitForConfirmedKill=" + waitForConfirmedKill + ", confirmedKillRetries=" + confirmedKillRetries ) ; }
Setup task controller component . Will be called prior to use .
33,767
final void destroyTaskJVM ( TaskControllerContext context ) { Thread taskJVMDestroyer = new Thread ( new DestroyJVMTaskRunnable ( context ) ) ; taskJVMDestroyer . start ( ) ; if ( waitForConfirmedKill ) { try { taskJVMDestroyer . join ( ) ; } catch ( InterruptedException e ) { throw new IllegalStateException ( "destroyTaskJVM: Failed to join " + taskJVMDestroyer . getName ( ) ) ; } } }
Use DestroyJVMTaskRunnable to kill task JVM asynchronously . Wait for the confirmed kill if configured so .
33,768
public Block getPenultimateBlock ( ) { if ( blocks == null || blocks . length <= 1 ) { return null ; } return blocks [ blocks . length - 2 ] ; }
Return the penultimate allocated block for this file .
33,769
public void addBlock ( BlockInfo newblock ) { if ( this . blocks == null ) { this . blocks = new BlockInfo [ 1 ] ; this . blocks [ 0 ] = newblock ; } else { int size = this . blocks . length ; BlockInfo [ ] newlist = new BlockInfo [ size + 1 ] ; System . arraycopy ( this . blocks , 0 , newlist , 0 , size ) ; newlist [ size ] = newblock ; this . blocks = newlist ; } }
add a block to the block list
33,770
public INodeRaidStorage convertToRaidStorage ( BlockInfo [ ] parityBlocks , RaidCodec codec , int [ ] checksums , BlocksMap blocksMap , short replication , INodeFile inode ) throws IOException { if ( codec == null ) { throw new IOException ( "Codec is null" ) ; } else { return new INodeRaidStorage ( codec . convertToRaidStorage ( parityBlocks , blocks , checksums , blocksMap , replication , inode ) , codec ) ; } }
Only used by merge it puts parity file s blocks and source file s blocks together into a block array to create an INodeRaidStorage
33,771
public void write ( byte [ ] b , int off , int len ) throws IOException { if ( compressor . finished ( ) ) { throw new IOException ( "write beyond end of stream" ) ; } if ( b == null ) { throw new NullPointerException ( ) ; } else if ( ( off < 0 ) || ( off > b . length ) || ( len < 0 ) || ( ( off + len ) > b . length ) ) { throw new IndexOutOfBoundsException ( ) ; } else if ( len == 0 ) { return ; } long limlen = compressor . getBytesRead ( ) ; if ( len + limlen > MAX_INPUT_SIZE && limlen > 0 ) { finish ( ) ; compressor . reset ( ) ; } if ( len > MAX_INPUT_SIZE ) { rawWriteInt ( len ) ; do { int bufLen = Math . min ( len , MAX_INPUT_SIZE ) ; compressor . setInput ( b , off , bufLen ) ; compressor . finish ( ) ; while ( ! compressor . finished ( ) ) { compress ( ) ; } compressor . reset ( ) ; off += bufLen ; len -= bufLen ; } while ( len > 0 ) ; return ; } compressor . setInput ( b , off , len ) ; if ( ! compressor . needsInput ( ) ) { rawWriteInt ( ( int ) compressor . getBytesRead ( ) ) ; do { compress ( ) ; } while ( ! compressor . needsInput ( ) ) ; } }
Write the data provided to the compression codec compressing no more than the buffer size less the compression overhead as specified during construction for each block .
33,772
boolean detachBlock ( int namespaceId , Block block , int numLinks ) throws IOException { if ( isDetached ( ) ) { return false ; } if ( blockDataFile . getFile ( ) == null || blockDataFile . volume == null ) { throw new IOException ( "detachBlock:Block not found. " + block ) ; } File meta = null ; if ( ! inlineChecksum ) { meta = BlockWithChecksumFileWriter . getMetaFile ( blockDataFile . getFile ( ) , block ) ; if ( meta == null ) { throw new IOException ( "Meta file not found for block " + block ) ; } } if ( HardLink . getLinkCount ( blockDataFile . getFile ( ) ) > numLinks ) { DataNode . LOG . info ( "CopyOnWrite for block " + block ) ; detachFile ( namespaceId , blockDataFile . getFile ( ) , block ) ; } if ( ! inlineChecksum ) { if ( HardLink . getLinkCount ( meta ) > numLinks ) { detachFile ( namespaceId , meta , block ) ; } } setDetached ( ) ; return true ; }
Returns true if this block was copied otherwise returns false .
33,773
public synchronized boolean addDependingJob ( Job dependingJob ) { if ( this . state == Job . WAITING ) { if ( this . dependingJobs == null ) { this . dependingJobs = new ArrayList < Job > ( ) ; } return this . dependingJobs . add ( dependingJob ) ; } else { return false ; } }
Add a job to this jobs dependency list . Dependent jobs can only be added while a Job is waiting to run not during or afterwards .
33,774
private void checkRunningState ( ) { RunningJob running = null ; try { running = jc . getJob ( this . mapredJobID ) ; if ( running . isComplete ( ) ) { if ( running . isSuccessful ( ) ) { this . state = Job . SUCCESS ; } else { this . state = Job . FAILED ; this . message = "Job failed!" ; try { running . killJob ( ) ; } catch ( IOException e1 ) { } try { this . jc . close ( ) ; } catch ( IOException e2 ) { } } } } catch ( IOException ioe ) { this . state = Job . FAILED ; this . message = StringUtils . stringifyException ( ioe ) ; try { if ( running != null ) running . killJob ( ) ; } catch ( IOException e1 ) { } try { this . jc . close ( ) ; } catch ( IOException e1 ) { } } }
Check the state of this running job . The state may remain the same become SUCCESS or FAILED .
33,775
synchronized int checkState ( ) { if ( this . state == Job . RUNNING ) { checkRunningState ( ) ; } if ( this . state != Job . WAITING ) { return this . state ; } if ( this . dependingJobs == null || this . dependingJobs . size ( ) == 0 ) { this . state = Job . READY ; return this . state ; } Job pred = null ; int n = this . dependingJobs . size ( ) ; for ( int i = 0 ; i < n ; i ++ ) { pred = this . dependingJobs . get ( i ) ; int s = pred . checkState ( ) ; if ( s == Job . WAITING || s == Job . READY || s == Job . RUNNING ) { break ; } if ( s == Job . FAILED || s == Job . DEPENDENT_FAILED ) { this . state = Job . DEPENDENT_FAILED ; this . message = "depending job " + i + " with jobID " + pred . getJobID ( ) + " failed. " + pred . getMessage ( ) ; break ; } if ( i == n - 1 ) { this . state = Job . READY ; } } return this . state ; }
Check and update the state of this job . The state changes depending on its current state and the states of the depending jobs .
33,776
protected synchronized void submit ( ) { try { if ( theJobConf . getBoolean ( "create.empty.dir.if.nonexist" , false ) ) { FileSystem fs = FileSystem . get ( theJobConf ) ; Path inputPaths [ ] = FileInputFormat . getInputPaths ( theJobConf ) ; for ( int i = 0 ; i < inputPaths . length ; i ++ ) { if ( ! fs . exists ( inputPaths [ i ] ) ) { try { fs . mkdirs ( inputPaths [ i ] ) ; } catch ( IOException e ) { } } } } RunningJob running = jc . submitJob ( theJobConf ) ; this . mapredJobID = running . getID ( ) ; this . state = Job . RUNNING ; } catch ( IOException ioe ) { this . state = Job . FAILED ; this . message = StringUtils . stringifyException ( ioe ) ; } }
Submit this job to mapred . The state becomes RUNNING if submission is successful FAILED otherwise .
33,777
public void write ( byte [ ] b , int start , int length ) throws IOException { currentDataSegmentBuffer . write ( b , start , length ) ; flushIfNeeded ( ) ; }
This function makes sure the whole buffer is written into the same data segment .
33,778
public void flush ( ) throws IOException { if ( currentDataSegmentBuffer . size ( ) == 0 ) { return ; } DataSegmentWriter currentDataSegment = new DataSegmentWriter ( currentDataSegmentBuffer , codec , codecCompressor ) ; updateMetadata ( currentDataSegmentBuffer . size ( ) , currentDataSegment . size ( ) ) ; currentDataSegment . writeTo ( dataSegmentDataOut ) ; currentDataSegmentBuffer . reset ( ) ; dataSegmentDataOut . flush ( ) ; }
Take the current data segment optionally compress it calculate the crc32 and then write it out .
33,779
protected void checkPath ( Path path ) { URI thisUri = this . getUri ( ) ; URI thatUri = path . toUri ( ) ; String thatAuthority = thatUri . getAuthority ( ) ; if ( thatUri . getScheme ( ) != null && thatUri . getScheme ( ) . equalsIgnoreCase ( thisUri . getScheme ( ) ) && thatUri . getPort ( ) == NameNode . DEFAULT_PORT && thisUri . getPort ( ) == - 1 && thatAuthority . substring ( 0 , thatAuthority . indexOf ( ":" ) ) . equalsIgnoreCase ( thisUri . getAuthority ( ) ) ) return ; super . checkPath ( path ) ; }
Permit paths which explicitly specify the default port .
33,780
public FSDataOutputStream append ( Path f , int bufferSize , Progressable progress ) throws IOException { DFSOutputStream op = ( DFSOutputStream ) dfs . append ( getPathName ( f ) , bufferSize , progress ) ; return new FSDataOutputStream ( op , statistics , op . getInitialLen ( ) ) ; }
This optional operation is not yet supported .
33,781
public void concat ( Path trg , Path [ ] psrcs , boolean restricted ) throws IOException { String [ ] srcs = new String [ psrcs . length ] ; for ( int i = 0 ; i < psrcs . length ; i ++ ) { srcs [ i ] = getPathName ( psrcs [ i ] ) ; } dfs . concat ( getPathName ( trg ) , srcs , restricted ) ; }
THIS IS DFS only operations it is not part of FileSystem move blocks from srcs to trg and delete srcs afterwards
33,782
public void concat ( Path trg , Path [ ] psrcs ) throws IOException { concat ( trg , psrcs , true ) ; }
THIS IS DFS only operations it is not part of FileSystem move blocks from srcs to trg and delete srcs afterwards All blocks should be of the same size
33,783
public void setQuota ( Path src , long namespaceQuota , long diskspaceQuota ) throws IOException { dfs . setQuota ( getPathName ( src ) , namespaceQuota , diskspaceQuota ) ; }
Set a directory s quotas
33,784
public FileStatus getFileStatus ( Path f ) throws IOException { FileStatus fi = dfs . getFileInfo ( getPathName ( f ) ) ; if ( fi != null ) { fi . makeQualified ( this ) ; return fi ; } else { throw new FileNotFoundException ( "File does not exist: " + f ) ; } }
Returns the stat information about the file .
33,785
private void joinAndCollect ( Object [ ] tags , ResetableIterator [ ] values , Object key , OutputCollector output , Reporter reporter ) throws IOException { if ( values . length < 1 ) { return ; } Object [ ] partialList = new Object [ values . length ] ; joinAndCollect ( tags , values , 0 , partialList , key , output , reporter ) ; }
join the list of the value lists and collect the results .
33,786
private void joinAndCollect ( Object [ ] tags , ResetableIterator [ ] values , int pos , Object [ ] partialList , Object key , OutputCollector output , Reporter reporter ) throws IOException { if ( values . length == pos ) { TaggedMapOutput combined = combine ( tags , partialList ) ; collect ( key , combined , output , reporter ) ; return ; } ResetableIterator nextValues = values [ pos ] ; nextValues . reset ( ) ; while ( nextValues . hasNext ( ) ) { Object v = nextValues . next ( ) ; partialList [ pos ] = v ; joinAndCollect ( tags , values , pos + 1 , partialList , key , output , reporter ) ; } }
Perform the actual join recursively .
33,787
public void purgeJob ( final HadoopJob job ) { runningJobs . remove ( job . getJobID ( ) ) ; Display . getDefault ( ) . asyncExec ( new Runnable ( ) { public void run ( ) { fireJobRemoved ( job ) ; } } ) ; }
Remove the given job from the currently running jobs map
33,788
public boolean loadFromXML ( File file ) throws ParserConfigurationException , SAXException , IOException { Configuration newConf = new Configuration ( this . conf ) ; DocumentBuilder builder = DocumentBuilderFactory . newInstance ( ) . newDocumentBuilder ( ) ; Document document = builder . parse ( file ) ; Element root = document . getDocumentElement ( ) ; if ( ! "configuration" . equals ( root . getTagName ( ) ) ) return false ; NodeList props = root . getChildNodes ( ) ; for ( int i = 0 ; i < props . getLength ( ) ; i ++ ) { Node propNode = props . item ( i ) ; if ( ! ( propNode instanceof Element ) ) continue ; Element prop = ( Element ) propNode ; if ( ! "property" . equals ( prop . getTagName ( ) ) ) return false ; NodeList fields = prop . getChildNodes ( ) ; String attr = null ; String value = null ; for ( int j = 0 ; j < fields . getLength ( ) ; j ++ ) { Node fieldNode = fields . item ( j ) ; if ( ! ( fieldNode instanceof Element ) ) continue ; Element field = ( Element ) fieldNode ; if ( "name" . equals ( field . getTagName ( ) ) ) attr = ( ( Text ) field . getFirstChild ( ) ) . getData ( ) ; if ( "value" . equals ( field . getTagName ( ) ) && field . hasChildNodes ( ) ) value = ( ( Text ) field . getFirstChild ( ) ) . getData ( ) ; } if ( attr != null && value != null ) newConf . set ( attr , value ) ; } this . conf = newConf ; return true ; }
Overwrite this location with settings available in the given XML file . The existing configuration is preserved if the XML file is invalid .
33,789
public void storeSettingsToFile ( File file ) throws IOException { FileOutputStream fos = new FileOutputStream ( file ) ; this . conf . writeXml ( fos ) ; fos . close ( ) ; }
Write this location settings to the given output stream
33,790
private void addPluginConfigDefaultProperties ( ) { for ( ConfProp prop : ConfProp . values ( ) ) { if ( conf . get ( prop . name ) == null ) conf . set ( prop . name , prop . defVal ) ; } }
Fill the configuration with valid default values
33,791
public static JobID downgrade ( org . apache . hadoop . mapreduce . JobID old ) { if ( old instanceof JobID ) { return ( JobID ) old ; } else { return new JobID ( old . getJtIdentifier ( ) , old . getId ( ) ) ; } }
Downgrade a new JobID to an old one
33,792
public static void logCall ( long entryTime , long returnTime , int callIndex , Object returnValue , Object argValues [ ] , long streamId ) { if ( ! API_TRACE_LOG . isInfoEnabled ( ) ) { return ; } long elapsed = returnTime ; elapsed -= entryTime ; entryTime -= baseTime ; StringBuilder line = new StringBuilder ( ) ; line . append ( pid + "," ) ; line . append ( nextEventId . getAndIncrement ( ) + "," ) ; line . append ( entryTime + "," ) ; line . append ( elapsed + "," ) ; line . append ( callIndex + "," ) ; line . append ( streamId + "," ) ; line . append ( escape ( returnValue ) ) ; if ( argValues != null ) { for ( int i = 0 ; i < argValues . length ; i ++ ) { line . append ( "," + escape ( argValues [ i ] ) ) ; } } API_TRACE_LOG . info ( line ) ; }
Record a method call and its return value in the log .
33,793
private static Stats calcStats ( List < Double > nums ) { double sum = 0.0 , mean = 0.0 , variance = 0.0 , stdDev = 0.0 ; for ( Double d : nums ) { sum += d . doubleValue ( ) ; } if ( nums . size ( ) > 0 ) { mean = sum / nums . size ( ) ; } sum = 0.0 ; for ( Double d : nums ) { sum += ( d . doubleValue ( ) - mean ) * ( d . doubleValue ( ) - mean ) ; } if ( nums . size ( ) > 0 ) { variance = sum / nums . size ( ) ; } stdDev = Math . sqrt ( variance ) ; return new Stats ( mean , variance , stdDev ) ; }
Calculates mean variance standard deviation for a set of numbers
33,794
public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeStringField ( "poolGroupName" , poolGroupName ) ; jsonGenerator . writeStringField ( "poolName" , poolName ) ; jsonGenerator . writeEndObject ( ) ; }
Used to write the state of the PoolInfo instance to disk when we are persisting the state of the ClusterManager
33,795
public static PoolInfoStrings createPoolInfoStrings ( PoolInfo poolInfo ) { if ( poolInfo == null ) { return null ; } return new PoolInfoStrings ( poolInfo . getPoolGroupName ( ) , poolInfo . getPoolName ( ) ) ; }
Convert this object to PoolInfoStrings for Thrift
33,796
public static PoolInfo createPoolInfo ( PoolInfoStrings poolInfoStrings ) { if ( poolInfoStrings == null ) { return null ; } return new PoolInfo ( poolInfoStrings . getPoolGroupName ( ) , poolInfoStrings . getPoolName ( ) ) ; }
Convert this object from PoolInfoStrings for Thrift
33,797
public static boolean isLegalPoolInfo ( PoolInfo poolInfo ) { if ( poolInfo == null || poolInfo . getPoolGroupName ( ) == null || poolInfo . getPoolName ( ) == null ) { return false ; } if ( INVALID_REGEX_PATTERN . matcher ( poolInfo . getPoolGroupName ( ) ) . matches ( ) || poolInfo . getPoolGroupName ( ) . isEmpty ( ) ) { return false ; } if ( INVALID_REGEX_PATTERN . matcher ( poolInfo . getPoolName ( ) ) . matches ( ) || poolInfo . getPoolName ( ) . isEmpty ( ) ) { return false ; } return true ; }
Returns whether or not the given pool name is legal .
33,798
public FsPermission applyUMask ( FsPermission umask ) { return new FsPermission ( useraction . and ( umask . useraction . not ( ) ) , groupaction . and ( umask . groupaction . not ( ) ) , otheraction . and ( umask . otheraction . not ( ) ) ) ; }
Apply a umask to this permission and return a new one
33,799
public static FsPermission valueOf ( String unixSymbolicPermission ) { if ( unixSymbolicPermission == null ) { return null ; } else if ( unixSymbolicPermission . length ( ) != 10 ) { throw new IllegalArgumentException ( "length != 10(unixSymbolicPermission=" + unixSymbolicPermission + ")" ) ; } int n = 0 ; for ( int i = 1 ; i < unixSymbolicPermission . length ( ) ; i ++ ) { n = n << 1 ; char c = unixSymbolicPermission . charAt ( i ) ; n += ( c == '-' || c == 'T' || c == 'S' ) ? 0 : 1 ; } return new FsPermission ( ( short ) n ) ; }
Create a FsPermission from a Unix symbolic permission string