idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,800
private static void checkNamedOutput ( JobConf conf , String namedOutput , boolean alreadyDefined ) { List < String > definedChannels = getNamedOutputsList ( conf ) ; if ( alreadyDefined && definedChannels . contains ( namedOutput ) ) { throw new IllegalArgumentException ( "Named output '" + namedOutput + "' already alreadyDefined" ) ; } else if ( ! alreadyDefined && ! definedChannels . contains ( namedOutput ) ) { throw new IllegalArgumentException ( "Named output '" + namedOutput + "' not defined" ) ; } }
Checks if a named output is alreadyDefined or not .
32,801
private static void checkTokenName ( String namedOutput ) { if ( namedOutput == null || namedOutput . length ( ) == 0 ) { throw new IllegalArgumentException ( "Name cannot be NULL or emtpy" ) ; } for ( char ch : namedOutput . toCharArray ( ) ) { if ( ( ch >= 'A' ) && ( ch <= 'Z' ) ) { continue ; } if ( ( ch >= 'a' ) && ( ch <= 'z' ) ) { continue ; } if ( ( ch >= '0' ) && ( ch <= '9' ) ) { continue ; } throw new IllegalArgumentException ( "Name cannot be have a '" + ch + "' char" ) ; } }
Checks if a named output name is valid token .
32,802
public static boolean isMultiNamedOutput ( JobConf conf , String namedOutput ) { checkNamedOutput ( conf , namedOutput , false ) ; return conf . getBoolean ( MO_PREFIX + namedOutput + MULTI , false ) ; }
Returns if a named output is multiple .
32,803
public synchronized boolean isAllowedHost ( String host ) { boolean isIncluded = includes . isEmpty ( ) || includes . contains ( host ) ; boolean isExcluded = excludes . contains ( host ) ; return isIncluded && ! isExcluded ; }
Checks if a host is part of the cluster as per configuration . For this the host must be included and not excluded . An empty includes files means the host is included .
32,804
public synchronized void readFields ( DataInput in ) throws IOException { name = CounterNames . intern ( Text . readString ( in ) ) ; if ( in . readBoolean ( ) ) { displayName = CounterNames . intern ( Text . readString ( in ) ) ; } else { displayName = name ; } value = WritableUtils . readVLong ( in ) ; }
Read the binary representation of the counter
32,805
public synchronized void write ( DataOutput out ) throws IOException { Text . writeString ( out , name ) ; boolean distinctDisplayName = ! name . equals ( displayName ) ; out . writeBoolean ( distinctDisplayName ) ; if ( distinctDisplayName ) { Text . writeString ( out , displayName ) ; } WritableUtils . writeVLong ( out , value ) ; }
Write the binary representation of the counter
32,806
public long getInputFiles ( long minSize , Collection < FileStatus > files ) throws IOException { updateLock . readLock ( ) . lock ( ) ; try { return root . selectFiles ( minSize , files ) ; } finally { updateLock . readLock ( ) . unlock ( ) ; } }
Gather a collection of files at least as large as minSize .
32,807
public BlockLocation [ ] locationsFor ( FileStatus stat , long start , long len ) throws IOException { return fs . getFileBlockLocations ( stat , start , len ) ; }
Get a set of locations for the given file .
32,808
public static void unJar ( File jarFile , File toDir ) throws IOException { JarFile jar = new JarFile ( jarFile ) ; try { Enumeration entries = jar . entries ( ) ; while ( entries . hasMoreElements ( ) ) { JarEntry entry = ( JarEntry ) entries . nextElement ( ) ; if ( ! entry . isDirectory ( ) ) { InputStream in = jar . getInputStream ( entry ) ; try { File file = new File ( toDir , entry . getName ( ) ) ; if ( ! file . getParentFile ( ) . mkdirs ( ) ) { if ( ! file . getParentFile ( ) . isDirectory ( ) ) { throw new IOException ( "Mkdirs failed to create " + file . getParentFile ( ) . toString ( ) ) ; } } OutputStream out = new FileOutputStream ( file ) ; try { byte [ ] buffer = new byte [ 8192 ] ; int i ; while ( ( i = in . read ( buffer ) ) != - 1 ) { out . write ( buffer , 0 , i ) ; } } finally { out . close ( ) ; } } finally { in . close ( ) ; } } } } finally { jar . close ( ) ; } }
Unpack a jar file into a directory .
32,809
public static void init ( ) { try { FastProtocolRegister . register ( FastProtocolId . SERIAL_VERSION_ID_1 , QJournalProtocol . class . getMethod ( "journal" , JournalRequestInfo . class ) ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } }
register fast protocol
32,810
public static void createHardLink ( File file , File linkName ) throws IOException { if ( file == null ) { throw new IOException ( "invalid arguments to createHardLink: source file is null" ) ; } if ( linkName == null ) { throw new IOException ( "invalid arguments to createHardLink: link name is null" ) ; } String [ ] hardLinkCommand = getHardLinkCommand . linkOne ( file , linkName ) ; Process process = Runtime . getRuntime ( ) . exec ( hardLinkCommand ) ; try { if ( process . waitFor ( ) != 0 ) { String errMsg = new BufferedReader ( new InputStreamReader ( process . getInputStream ( ) ) ) . readLine ( ) ; if ( errMsg == null ) errMsg = "" ; String inpMsg = new BufferedReader ( new InputStreamReader ( process . getErrorStream ( ) ) ) . readLine ( ) ; if ( inpMsg == null ) inpMsg = "" ; throw new IOException ( errMsg + inpMsg ) ; } } catch ( InterruptedException e ) { throw new IOException ( e ) ; } finally { process . destroy ( ) ; } }
Creates a hardlink
32,811
public static int getLinkCount ( File fileName ) throws IOException { if ( fileName == null ) { throw new IOException ( "invalid argument to getLinkCount: file name is null" ) ; } if ( ! fileName . exists ( ) ) { throw new FileNotFoundException ( fileName + " not found." ) ; } String [ ] cmd = getHardLinkCommand . linkCount ( fileName ) ; String inpMsg = null ; String errMsg = null ; int exitValue = - 1 ; BufferedReader in = null ; BufferedReader err = null ; Process process = Runtime . getRuntime ( ) . exec ( cmd ) ; try { exitValue = process . waitFor ( ) ; in = new BufferedReader ( new InputStreamReader ( process . getInputStream ( ) ) ) ; inpMsg = in . readLine ( ) ; err = new BufferedReader ( new InputStreamReader ( process . getErrorStream ( ) ) ) ; errMsg = err . readLine ( ) ; if ( inpMsg == null || exitValue != 0 ) { throw createIOException ( fileName , inpMsg , errMsg , exitValue , null ) ; } if ( osType == OSType . OS_TYPE_SOLARIS ) { String [ ] result = inpMsg . split ( "\\s+" ) ; return Integer . parseInt ( result [ 1 ] ) ; } else { return Integer . parseInt ( inpMsg ) ; } } catch ( NumberFormatException e ) { throw createIOException ( fileName , inpMsg , errMsg , exitValue , e ) ; } catch ( InterruptedException e ) { throw createIOException ( fileName , inpMsg , errMsg , exitValue , e ) ; } finally { process . destroy ( ) ; if ( in != null ) in . close ( ) ; if ( err != null ) err . close ( ) ; } }
Retrieves the number of links to the specified file .
32,812
protected URI createRedirectUri ( String servletpath , UserGroupInformation ugi , DatanodeID host , HttpServletRequest request , NameNode nn ) throws URISyntaxException { final String hostname = host instanceof DatanodeInfo ? ( ( DatanodeInfo ) host ) . getHostName ( ) : host . getHost ( ) ; final String scheme = request . getScheme ( ) ; final int port = "https" . equals ( scheme ) ? ( Integer ) getServletContext ( ) . getAttribute ( "datanode.https.port" ) : host . getInfoPort ( ) ; final String nnAddr = NetUtils . toIpPort ( nn . getNameNodeAddress ( ) ) ; final String filename = request . getPathInfo ( ) ; return new URI ( scheme , null , hostname , port , servletpath , "filename=" + filename + "&ugi=" + ugi + JspHelper . getUrlParam ( JspHelper . NAMENODE_ADDRESS , nnAddr ) , null ) ; }
Create a URI for redirecting request to a datanode
32,813
protected String getFilename ( HttpServletRequest request , HttpServletResponse response ) throws IOException { final String filename = request . getParameter ( "filename" ) ; if ( filename == null || filename . length ( ) == 0 ) { throw new IOException ( "Invalid filename" ) ; } return filename ; }
Get filename from the request
32,814
protected URI createUri ( String file , DatanodeID [ ] candidates , UnixUserGroupInformation ugi , HttpServletRequest request ) throws URISyntaxException { String scheme = request . getScheme ( ) ; final DatanodeID host = candidates [ 0 ] ; final String hostname ; if ( host instanceof DatanodeInfo ) { hostname = ( ( DatanodeInfo ) host ) . getHostName ( ) ; } else { hostname = host . getHost ( ) ; } StringBuilder builder = new StringBuilder ( ) ; builder . append ( "ugi=" + ugi ) ; Enumeration < ? > it = request . getParameterNames ( ) ; while ( it . hasMoreElements ( ) ) { String key = it . nextElement ( ) . toString ( ) ; String value = request . getParameter ( key ) ; builder . append ( "&" + key + "=" + value ) ; } if ( candidates . length > 1 ) { builder . append ( "&candidates=" ) ; appendDatanodeID ( builder , candidates [ 1 ] ) ; for ( int j = 2 ; j < candidates . length ; j ++ ) { builder . append ( " " ) ; appendDatanodeID ( builder , candidates [ j ] ) ; } } NameNode nn = ( NameNode ) getServletContext ( ) . getAttribute ( "name.node" ) ; String addr = NetUtils . toIpPort ( nn . getNameNodeAddress ( ) ) ; builder . append ( JspHelper . getUrlParam ( JspHelper . NAMENODE_ADDRESS , addr ) ) ; return new URI ( scheme , null , hostname , "https" . equals ( scheme ) ? ( Integer ) getServletContext ( ) . getAttribute ( "datanode.https.port" ) : host . getInfoPort ( ) , "/streamFile" + file , builder . toString ( ) , null ) ; }
Create a URI for streaming a file
32,815
void handleFailure ( IOException ex , int failures ) throws IOException { if ( failoverClient . isShuttingdown ( ) || ! shouldHandleException ( ex ) ) { throw ex ; } if ( failures > FAILURE_RETRY ) { throw ex ; } try { if ( ! watchZK ) { LOG . debug ( "Not watching ZK, so checking explicitly" ) ; fsLock . readLock ( ) . unlock ( ) ; InjectionHandler . processEvent ( InjectionEvent . DAFS_CHECK_FAILOVER ) ; fsLock . writeLock ( ) . lock ( ) ; boolean failover = false ; try { failover = zkCheckFailover ( ex ) ; } finally { fsLock . writeLock ( ) . unlock ( ) ; fsLock . readLock ( ) . lock ( ) ; } if ( failover ) { return ; } } Thread . sleep ( 1000 ) ; } catch ( InterruptedException iex ) { LOG . error ( "Interrupted while waiting for a failover" , iex ) ; Thread . currentThread ( ) . interrupt ( ) ; } }
This function should be called within try .. finally which releases readlock if this fails .
32,816
public boolean isValidPosixFileChar ( char c ) { if ( ( ( ( c >= 'A' ) && ( c <= 'Z' ) ) || ( ( c >= 'a' ) && ( c <= 'z' ) ) || ( ( c >= '0' ) && ( c <= '9' ) ) || ( c == '.' ) || ( c == '_' ) || ( c == '-' ) ) ) { return true ; } else { return false ; } }
Test whether the character c belongs to the accepted list of posix filename characters A - Za - z0 - 9 . _ -
32,817
public boolean isValidPosixFileName ( String name ) { for ( int i = 0 ; i < name . length ( ) ; i ++ ) { char c = name . charAt ( i ) ; if ( i == 0 ) { if ( c == '-' ) { return false ; } } if ( ! isValidPosixFileChar ( c ) ) { return false ; } } return true ; }
Test whether filename is a valid posix filename A posix filename must contain characters A - Za - z0 - 9 . _ - and - must not be the first character
32,818
private void compareStrings ( List < String > c1 , List < String > c2 , TreePath loc , String eltname ) throws DeepInequalityException { if ( c1 == null && c2 == null ) { return ; } TreePath recursePath = new TreePath ( loc , eltname ) ; if ( c1 == null || c2 == null || ! c1 . equals ( c2 ) ) { throw new DeepInequalityException ( eltname + " miscompared" , recursePath ) ; } }
I ll treat this as an atomic object type
32,819
void rollIfNeeded ( ) throws IOException { if ( numberOfParts == 1 || numberOfFiles < 1 ) { return ; } filesCount ++ ; if ( filesCount % filesPerRoll == 0 ) { out . close ( ) ; currentPart ++ ; createOutputStream ( ) ; } }
Close current segment and start a new one if needed
32,820
public void adjustLocalityRequirement ( long now , long nodeWait , long rackWait ) { if ( ! localityWaitStarted ) { return ; } if ( localityRequired == LocalityLevel . ANY ) { return ; } if ( localityRequired == LocalityLevel . NODE ) { if ( now - localityWaitStartTime > nodeWait ) { setLocalityLevel ( LocalityLevel . RACK ) ; } } if ( localityRequired == LocalityLevel . RACK ) { if ( now - localityWaitStartTime > rackWait ) { setLocalityLevel ( LocalityLevel . ANY ) ; } } }
Adjust the locality requirement based on the current locality and the locality wait times . If the current required locality is node and enough time has passed - update it to rack . If the current is rack and enough time has passed - update to any
32,821
public void setLocalityLevel ( LocalityLevel level ) { localityRequired = level ; lastLocality = level ; localityWaitStarted = false ; localityWaitStartTime = Long . MAX_VALUE ; }
Update the required locality level for the session
32,822
public void readFields ( DataInput in ) throws IOException { String ugiType = Text . readString ( in ) ; if ( ! UGI_TECHNOLOGY . equals ( ugiType ) ) { throw new IOException ( "Expect UGI prefix: " + UGI_TECHNOLOGY + ", but receive a prefix: " + ugiType ) ; } userName = Text . readString ( in ) ; int numOfGroups = WritableUtils . readVInt ( in ) ; groupNames = new String [ numOfGroups ] ; for ( int i = 0 ; i < numOfGroups ; i ++ ) { groupNames [ i ] = Text . readString ( in ) ; } }
Deserialize this object First check if this is a UGI in the string format . If no throw an IOException ; otherwise set this object s fields by reading them from the given data input
32,823
public void write ( DataOutput out ) throws IOException { Text . writeString ( out , UGI_TECHNOLOGY ) ; Text . writeString ( out , userName ) ; WritableUtils . writeVInt ( out , groupNames . length ) ; for ( String groupName : groupNames ) { Text . writeString ( out , groupName ) ; } }
Serialize this object First write a string marking that this is a UGI in the string format then write this object s serialized form to the given data output
32,824
public static UnixUserGroupInformation login ( ) throws LoginException { try { String userName ; try { userName = getUnixUserName ( ) ; } catch ( Exception e ) { userName = DEFAULT_USERNAME ; } UnixUserGroupInformation ugi = user2UGIMap . get ( userName ) ; if ( ugi != null ) { return ugi ; } String [ ] groupNames ; try { groupNames = getUnixGroups ( ) ; } catch ( Exception e ) { groupNames = new String [ 1 ] ; groupNames [ 0 ] = DEFAULT_GROUP ; } ugi = new UnixUserGroupInformation ( userName , groupNames ) ; user2UGIMap . put ( ugi . getUserName ( ) , ugi ) ; return ugi ; } catch ( Exception e ) { throw new LoginException ( "Login failed: " + e . getMessage ( ) ) ; } }
Get current user s name and the names of all its groups from Unix . It s assumed that there is only one UGI per user . If this user already has a UGI in the ugi map return the ugi in the map . Otherwise get the current user s information from Unix store it in the map and return it .
32,825
public static UnixUserGroupInformation login ( Configuration conf , boolean save ) throws LoginException { UnixUserGroupInformation ugi = readFromConf ( conf , UGI_PROPERTY_NAME ) ; if ( ugi == null ) { ugi = login ( ) ; LOG . debug ( "Unix Login: " + ugi ) ; if ( save ) { saveToConf ( conf , UGI_PROPERTY_NAME , ugi ) ; } } return ugi ; }
Get a user s name & its group names from the given configuration ; If it is not defined in the configuration get the current user s information from Unix . If the user has a UGI in the ugi map return the one in the UGI map .
32,826
public void addField ( String fieldName , TypeID tid ) { sTid . getFieldTypeInfos ( ) . add ( new FieldTypeInfo ( fieldName , tid ) ) ; }
Add a field .
32,827
public RecordTypeInfo getNestedStructTypeInfo ( String name ) { StructTypeID stid = sTid . findStruct ( name ) ; if ( null == stid ) return null ; return new RecordTypeInfo ( name , stid ) ; }
Return the type info of a nested record . We only consider nesting to one level .
32,828
public void serialize ( RecordOutput rout , String tag ) throws IOException { rout . startRecord ( this , tag ) ; rout . writeString ( name , tag ) ; sTid . writeRest ( rout , tag ) ; rout . endRecord ( this , tag ) ; }
Serialize the type information for a record
32,829
public void deserialize ( RecordInput rin , String tag ) throws IOException { rin . startRecord ( tag ) ; this . name = rin . readString ( tag ) ; sTid . read ( rin , tag ) ; rin . endRecord ( tag ) ; }
Deserialize the type information for a record
32,830
private void xdr_string ( String s ) { byte [ ] bytes = s . getBytes ( ) ; int len = bytes . length ; xdr_int ( len ) ; System . arraycopy ( bytes , 0 , buffer , offset , len ) ; offset += len ; pad ( ) ; }
Puts a string into the buffer by first writing the size of the string as an int followed by the bytes of the string padded if necessary to a multiple of 4 .
32,831
private void xdr_int ( int i ) { buffer [ offset ++ ] = ( byte ) ( ( i >> 24 ) & 0xff ) ; buffer [ offset ++ ] = ( byte ) ( ( i >> 16 ) & 0xff ) ; buffer [ offset ++ ] = ( byte ) ( ( i >> 8 ) & 0xff ) ; buffer [ offset ++ ] = ( byte ) ( i & 0xff ) ; }
Puts an integer into the buffer as 4 bytes big - endian .
32,832
Map < FSVolume , List < Map < Block , DatanodeBlockInfo > > > getBlockCrcPerVolume ( List < FSVolume > volumes ) { Map < FSVolume , List < Map < Block , DatanodeBlockInfo > > > retMap = new HashMap < FSVolume , List < Map < Block , DatanodeBlockInfo > > > ( ) ; for ( FSVolume volume : volumes ) { List < Map < Block , DatanodeBlockInfo > > newSubMap = new ArrayList < Map < Block , DatanodeBlockInfo > > ( numBucket ) ; for ( int i = 0 ; i < numBucket ; i ++ ) { newSubMap . add ( new HashMap < Block , DatanodeBlockInfo > ( ) ) ; } retMap . put ( volume , newSubMap ) ; } for ( BlockBucket bb : blockBuckets ) { bb . getBlockCrcPerVolume ( retMap ) ; } return retMap ; }
get a list of block info with CRC information per FS volume .
32,833
public K key ( ) { if ( jc . hasNext ( ) ) { return jc . key ( ) ; } if ( ! q . isEmpty ( ) ) { return q . peek ( ) . key ( ) ; } return null ; }
Return the key for the current join or the value at the top of the RecordReader heap .
32,834
public void skip ( K key ) throws IOException { ArrayList < ComposableRecordReader < K , ? > > tmp = new ArrayList < ComposableRecordReader < K , ? > > ( ) ; while ( ! q . isEmpty ( ) && cmp . compare ( q . peek ( ) . key ( ) , key ) <= 0 ) { tmp . add ( q . poll ( ) ) ; } for ( ComposableRecordReader < K , ? > rr : tmp ) { rr . skip ( key ) ; if ( rr . hasNext ( ) ) { q . add ( rr ) ; } } }
Pass skip key to child RRs .
32,835
@ SuppressWarnings ( "unchecked" ) public void accept ( CompositeRecordReader . JoinCollector jc , K key ) throws IOException { if ( hasNext ( ) && 0 == cmp . compare ( key , key ( ) ) ) { fillJoinCollector ( createKey ( ) ) ; jc . add ( id , getDelegate ( ) ) ; return ; } jc . add ( id , EMPTY ) ; }
If key provided matches that of this Composite give JoinCollector iterator over values it may emit .
32,836
protected void fillJoinCollector ( K iterkey ) throws IOException { if ( ! q . isEmpty ( ) ) { q . peek ( ) . key ( iterkey ) ; while ( 0 == cmp . compare ( q . peek ( ) . key ( ) , iterkey ) ) { ComposableRecordReader < K , ? > t = q . poll ( ) ; t . accept ( jc , iterkey ) ; if ( t . hasNext ( ) ) { q . add ( t ) ; } else if ( q . isEmpty ( ) ) { return ; } } } }
For all child RRs offering the key provided obtain an iterator at that position in the JoinCollector .
32,837
@ SuppressWarnings ( "unchecked" ) public K createKey ( ) { if ( null == keyclass ) { final Class < ? > cls = kids [ 0 ] . createKey ( ) . getClass ( ) ; for ( RecordReader < K , ? extends Writable > rr : kids ) { if ( ! cls . equals ( rr . createKey ( ) . getClass ( ) ) ) { throw new ClassCastException ( "Child key classes fail to agree" ) ; } } keyclass = cls . asSubclass ( WritableComparable . class ) ; } return ( K ) ReflectionUtils . newInstance ( keyclass , getConf ( ) ) ; }
Create a new key value common to all child RRs .
32,838
protected TupleWritable createInternalValue ( ) { Writable [ ] vals = new Writable [ kids . length ] ; for ( int i = 0 ; i < vals . length ; ++ i ) { vals [ i ] = kids [ i ] . createValue ( ) ; } return new TupleWritable ( vals ) ; }
Create a value to be used internally for joins .
32,839
public void close ( ) throws IOException { if ( kids != null ) { for ( RecordReader < K , ? extends Writable > rr : kids ) { rr . close ( ) ; } } if ( jc != null ) { jc . close ( ) ; } }
Close all child RRs .
32,840
public float getProgress ( ) throws IOException { float ret = 1.0f ; for ( RecordReader < K , ? extends Writable > rr : kids ) { ret = Math . min ( ret , rr . getProgress ( ) ) ; } return ret ; }
Report progress as the minimum of all child RR progress .
32,841
Connection getConnection ( ) throws ClassNotFoundException , SQLException { Class . forName ( job . get ( DBConfiguration . DRIVER_CLASS_PROPERTY ) ) ; if ( job . get ( DBConfiguration . USERNAME_PROPERTY ) == null ) { return DriverManager . getConnection ( job . get ( DBConfiguration . URL_PROPERTY ) ) ; } else { return DriverManager . getConnection ( job . get ( DBConfiguration . URL_PROPERTY ) , job . get ( DBConfiguration . USERNAME_PROPERTY ) , job . get ( DBConfiguration . PASSWORD_PROPERTY ) ) ; } }
Returns a connection object o the DB
32,842
public static FastWritable tryGetInstance ( String name , Configuration conf ) { if ( name . length ( ) != NAME_LEN ) { return null ; } FastWritable fw = register . get ( name ) ; return fw == null ? null : fw . getFastWritableInstance ( conf ) ; }
Tries to get an instance given the name of class . If the name is registered as FastWritable the instance is obtained using the registry .
32,843
public HadoopServer performFinish ( ) { try { if ( this . original == null ) { Display . getDefault ( ) . syncExec ( new Runnable ( ) { public void run ( ) { ServerRegistry . getInstance ( ) . addServer ( HadoopLocationWizard . this . location ) ; } } ) ; return this . location ; } else { final String originalName = this . original . getLocationName ( ) ; this . original . load ( this . location ) ; Display . getDefault ( ) . syncExec ( new Runnable ( ) { public void run ( ) { ServerRegistry . getInstance ( ) . updateServer ( originalName , HadoopLocationWizard . this . location ) ; } } ) ; return this . original ; } } catch ( Exception e ) { e . printStackTrace ( ) ; setMessage ( "Invalid server location values" , IMessageProvider . ERROR ) ; return null ; } }
Performs any actions appropriate in response to the user having pressed the Finish button or refuse if finishing now is not permitted .
32,844
private Text createConfNameEditor ( ModifyListener listener , Composite parent , String propName , String labelText ) { { ConfProp prop = ConfProp . getByName ( propName ) ; if ( prop != null ) return createConfLabelText ( listener , parent , prop , labelText ) ; } Label label = new Label ( parent , SWT . NONE ) ; if ( labelText == null ) labelText = propName ; label . setText ( labelText ) ; Text text = new Text ( parent , SWT . SINGLE | SWT . BORDER ) ; GridData data = new GridData ( GridData . FILL_HORIZONTAL ) ; text . setLayoutData ( data ) ; text . setData ( "hPropName" , propName ) ; text . setText ( location . getConfProp ( propName ) ) ; text . addModifyListener ( listener ) ; return text ; }
Create an editor entry for the given configuration name
32,845
public static void transform ( InputStream styleSheet , InputStream xml , Writer out ) throws TransformerConfigurationException , TransformerException { TransformerFactory tFactory = TransformerFactory . newInstance ( ) ; Transformer transformer = tFactory . newTransformer ( new StreamSource ( styleSheet ) ) ; transformer . transform ( new StreamSource ( xml ) , new StreamResult ( out ) ) ; }
Transform input xml given a stylesheet .
32,846
public InputSplit [ ] getSplits ( JobConf job , int numSplits ) throws IOException { ArrayList < FileSplit > splits = new ArrayList < FileSplit > ( ) ; for ( FileStatus status : listLocatedStatus ( job ) ) { Path fileName = status . getPath ( ) ; if ( status . isDir ( ) ) { throw new IOException ( "Not a file: " + fileName ) ; } FileSystem fs = fileName . getFileSystem ( job ) ; LineReader lr = null ; try { FSDataInputStream in = fs . open ( fileName ) ; lr = new LineReader ( in , job ) ; Text line = new Text ( ) ; int numLines = 0 ; long begin = 0 ; long length = 0 ; int num = - 1 ; while ( ( num = lr . readLine ( line ) ) > 0 ) { numLines ++ ; length += num ; if ( numLines == N ) { splits . add ( new FileSplit ( fileName , begin , length , new String [ ] { } ) ) ; begin += length ; length = 0 ; numLines = 0 ; } } if ( numLines != 0 ) { splits . add ( new FileSplit ( fileName , begin , length , new String [ ] { } ) ) ; } } finally { if ( lr != null ) { lr . close ( ) ; } } } return splits . toArray ( new FileSplit [ splits . size ( ) ] ) ; }
Logically splits the set of input files for the job splits N lines of the input as one split .
32,847
public List < InputSplit > getSplits ( JobContext job ) throws IOException { long minSize = Math . max ( getFormatMinSplitSize ( ) , getMinSplitSize ( job ) ) ; long maxSize = getMaxSplitSize ( job ) ; List < InputSplit > splits = new ArrayList < InputSplit > ( ) ; for ( LocatedFileStatus file : listLocatedStatus ( job ) ) { Path path = file . getPath ( ) ; long length = file . getLen ( ) ; BlockLocation [ ] blkLocations = file . getBlockLocations ( ) ; if ( ( length != 0 ) && isSplitable ( job , path ) ) { long blockSize = file . getBlockSize ( ) ; long splitSize = computeSplitSize ( blockSize , minSize , maxSize ) ; long bytesRemaining = length ; while ( ( ( double ) bytesRemaining ) / splitSize > SPLIT_SLOP ) { int blkIndex = getBlockIndex ( blkLocations , length - bytesRemaining ) ; splits . add ( new FileSplit ( path , length - bytesRemaining , splitSize , blkLocations [ blkIndex ] . getHosts ( ) ) ) ; bytesRemaining -= splitSize ; } if ( bytesRemaining != 0 ) { splits . add ( new FileSplit ( path , length - bytesRemaining , bytesRemaining , blkLocations [ blkLocations . length - 1 ] . getHosts ( ) ) ) ; } } else if ( length != 0 ) { splits . add ( new FileSplit ( path , 0 , length , blkLocations [ 0 ] . getHosts ( ) ) ) ; } else { splits . add ( new FileSplit ( path , 0 , length , new String [ 0 ] ) ) ; } } LOG . debug ( "Total # of splits: " + splits . size ( ) ) ; return splits ; }
Generate the list of files and make them into FileSplits .
32,848
public static TrashPolicy getInstance ( Configuration conf , FileSystem fs , Path home ) throws IOException { Class < ? extends TrashPolicy > trashClass = conf . getClass ( "fs.trash.classname" , TrashPolicyDefault . class , TrashPolicy . class ) ; TrashPolicy trash = ( TrashPolicy ) ReflectionUtils . newInstance ( trashClass , conf ) ; trash . initialize ( conf , fs , home ) ; return trash ; }
Get an instance of the configured TrashPolicy based on the value of the configuration paramater fs . trash . classname .
32,849
public synchronized void reset ( ) { finish = false ; finished = false ; uncompressedDirectBuf . clear ( ) ; uncompressedDirectBufLen = 0 ; compressedDirectBuf . clear ( ) ; compressedDirectBuf . limit ( 0 ) ; userBufOff = userBufLen = 0 ; bytesRead = bytesWritten = 0L ; }
Resets compressor so that a new set of input data can be processed .
32,850
public void logSubmitted ( String jobConfPath , long submitTime , String jobTrackerId ) throws IOException { if ( disableHistory ) { return ; } int defaultBufferSize = logDirFs . getConf ( ) . getInt ( "io.file.buffer.size" , 4096 ) ; try { FSDataOutputStream out = null ; PrintWriter writer = null ; if ( logDirFs . exists ( logFile ) ) { LOG . info ( "Remove the old history file " + logFile ) ; logDirFs . delete ( logFile , true ) ; } out = logDirFs . create ( logFile , new FsPermission ( HISTORY_FILE_PERMISSION ) , true , defaultBufferSize , logDirFs . getDefaultReplication ( ) , jobHistoryBlockSize , null ) ; writer = new PrintWriter ( out ) ; fileManager . addWriter ( jobId , writer ) ; fileManager . setHistoryFile ( jobId , logFile ) ; writers = fileManager . getWriters ( jobId ) ; if ( null != writers ) { log ( writers , RecordTypes . Meta , new Keys [ ] { Keys . VERSION } , new String [ ] { String . valueOf ( JobHistory . VERSION ) } ) ; } String jobName = getJobName ( ) ; String user = getUserName ( ) ; log ( writers , RecordTypes . Job , new Keys [ ] { Keys . JOBID , Keys . JOBNAME , Keys . USER , Keys . SUBMIT_TIME , Keys . JOBCONF , Keys . JOBTRACKERID } , new String [ ] { jobId . toString ( ) , jobName , user , String . valueOf ( submitTime ) , jobConfPath , jobTrackerId } ) ; } catch ( IOException e ) { disableHistory = true ; } Path jobFilePath = new Path ( logDir , CoronaJobHistoryFilesManager . getConfFilename ( jobId ) ) ; fileManager . setConfFile ( jobId , jobFilePath ) ; FSDataOutputStream jobFileOut = null ; try { if ( ! logDirFs . exists ( jobFilePath ) ) { jobFileOut = logDirFs . create ( jobFilePath , new FsPermission ( HISTORY_FILE_PERMISSION ) , true , defaultBufferSize , logDirFs . getDefaultReplication ( ) , logDirFs . getDefaultBlockSize ( ) , null ) ; conf . writeXml ( jobFileOut ) ; jobFileOut . close ( ) ; } } catch ( IOException ioe ) { LOG . error ( "Failed to store job conf in the log dir" , ioe ) ; } finally { if ( jobFileOut != null ) { try { jobFileOut . close ( ) ; } catch ( IOException ie ) { LOG . info ( "Failed to close the job configuration file " + StringUtils . stringifyException ( ie ) ) ; } } } }
Log job submitted event to history . Creates a new file in history for the job . if history file creation fails it disables history for all other events .
32,851
public void logInited ( long startTime , int totalMaps , int totalReduces ) { if ( disableHistory ) { return ; } if ( null != writers ) { log ( writers , RecordTypes . Job , new Keys [ ] { Keys . JOBID , Keys . LAUNCH_TIME , Keys . TOTAL_MAPS , Keys . TOTAL_REDUCES , Keys . JOB_STATUS } , new String [ ] { jobId . toString ( ) , String . valueOf ( startTime ) , String . valueOf ( totalMaps ) , String . valueOf ( totalReduces ) , Values . PREP . name ( ) } ) ; } }
Logs launch time of job .
32,852
public void logStarted ( ) { if ( disableHistory ) { return ; } if ( null != writers ) { log ( writers , RecordTypes . Job , new Keys [ ] { Keys . JOBID , Keys . JOB_STATUS } , new String [ ] { jobId . toString ( ) , Values . RUNNING . name ( ) } ) ; } }
Logs job as running
32,853
public void logFinished ( long finishTime , int finishedMaps , int finishedReduces , int failedMaps , int failedReduces , int killedMaps , int killedReduces , Counters mapCounters , Counters reduceCounters , Counters counters ) { if ( disableHistory ) { return ; } if ( null != writers ) { log ( writers , RecordTypes . Job , new Keys [ ] { Keys . JOBID , Keys . FINISH_TIME , Keys . JOB_STATUS , Keys . FINISHED_MAPS , Keys . FINISHED_REDUCES , Keys . FAILED_MAPS , Keys . FAILED_REDUCES , Keys . KILLED_MAPS , Keys . KILLED_REDUCES , Keys . MAP_COUNTERS , Keys . REDUCE_COUNTERS , Keys . COUNTERS } , new String [ ] { jobId . toString ( ) , Long . toString ( finishTime ) , Values . SUCCESS . name ( ) , String . valueOf ( finishedMaps ) , String . valueOf ( finishedReduces ) , String . valueOf ( failedMaps ) , String . valueOf ( failedReduces ) , String . valueOf ( killedMaps ) , String . valueOf ( killedReduces ) , mapCounters . makeEscapedCompactString ( ) , reduceCounters . makeEscapedCompactString ( ) , counters . makeEscapedCompactString ( ) } , true ) ; closeAndClear ( writers ) ; } }
Log job finished . closes the job file in history .
32,854
public void logFailed ( long timestamp , int finishedMaps , int finishedReduces , Counters counters ) { if ( disableHistory ) { return ; } if ( null != writers ) { log ( writers , RecordTypes . Job , new Keys [ ] { Keys . JOBID , Keys . FINISH_TIME , Keys . JOB_STATUS , Keys . FINISHED_MAPS , Keys . FINISHED_REDUCES , Keys . COUNTERS } , new String [ ] { jobId . toString ( ) , String . valueOf ( timestamp ) , Values . FAILED . name ( ) , String . valueOf ( finishedMaps ) , String . valueOf ( finishedReduces ) , counters . makeEscapedCompactString ( ) } , true ) ; closeAndClear ( writers ) ; } }
Logs job failed event . Closes the job history log file .
32,855
public void logJobPriority ( JobID jobid , JobPriority priority ) { if ( disableHistory ) { return ; } if ( null != writers ) { log ( writers , RecordTypes . Job , new Keys [ ] { Keys . JOBID , Keys . JOB_PRIORITY } , new String [ ] { jobId . toString ( ) , priority . toString ( ) } ) ; } }
Log job s priority .
32,856
public void markCompleted ( ) throws IOException { if ( disableHistory ) { return ; } fileManager . moveToDone ( jobId , true , CoronaJobTracker . getMainJobID ( jobId ) ) ; }
Move the completed job into the completed folder .
32,857
public void logTaskFinished ( TaskID taskId , String taskType , long finishTime , Counters counters ) { if ( disableHistory ) { return ; } JobID id = taskId . getJobID ( ) ; if ( ! this . jobId . equals ( id ) ) { throw new RuntimeException ( "JobId from task: " + id + " does not match expected: " + jobId ) ; } if ( null != writers ) { log ( writers , RecordTypes . Task , new Keys [ ] { Keys . TASKID , Keys . TASK_TYPE , Keys . TASK_STATUS , Keys . FINISH_TIME , Keys . COUNTERS } , new String [ ] { taskId . toString ( ) , taskType , Values . SUCCESS . name ( ) , String . valueOf ( finishTime ) , counters . makeEscapedCompactString ( ) } ) ; } }
Log finish time of task .
32,858
public void logTaskUpdates ( TaskID taskId , long finishTime ) { if ( disableHistory ) { return ; } JobID id = taskId . getJobID ( ) ; if ( ! this . jobId . equals ( id ) ) { throw new RuntimeException ( "JobId from task: " + id + " does not match expected: " + jobId ) ; } if ( null != writers ) { log ( writers , RecordTypes . Task , new Keys [ ] { Keys . TASKID , Keys . FINISH_TIME } , new String [ ] { taskId . toString ( ) , String . valueOf ( finishTime ) } ) ; } }
Update the finish time of task .
32,859
public void logTaskFailed ( TaskID taskId , String taskType , long time , String error ) { logTaskFailed ( taskId , taskType , time , error , null ) ; }
Log job failed event .
32,860
public void logTaskFailed ( TaskID taskId , String taskType , long time , String error , TaskAttemptID failedDueToAttempt ) { if ( disableHistory ) { return ; } JobID id = taskId . getJobID ( ) ; if ( ! this . jobId . equals ( id ) ) { throw new RuntimeException ( "JobId from task: " + id + " does not match expected: " + jobId ) ; } if ( null != writers ) { String failedAttempt = failedDueToAttempt == null ? "" : failedDueToAttempt . toString ( ) ; log ( writers , RecordTypes . Task , new Keys [ ] { Keys . TASKID , Keys . TASK_TYPE , Keys . TASK_STATUS , Keys . FINISH_TIME , Keys . ERROR , Keys . TASK_ATTEMPT_ID } , new String [ ] { taskId . toString ( ) , taskType , Values . FAILED . name ( ) , String . valueOf ( time ) , error , failedAttempt } ) ; } }
Log the task failure
32,861
public void logMapTaskStarted ( TaskAttemptID taskAttemptId , long startTime , String trackerName , int httpPort , String taskType ) { if ( disableHistory ) { return ; } JobID id = taskAttemptId . getJobID ( ) ; if ( ! this . jobId . equals ( id ) ) { throw new RuntimeException ( "JobId from task: " + id + " does not match expected: " + jobId ) ; } if ( null != writers ) { log ( writers , RecordTypes . MapAttempt , new Keys [ ] { Keys . TASK_TYPE , Keys . TASKID , Keys . TASK_ATTEMPT_ID , Keys . START_TIME , Keys . TRACKER_NAME , Keys . HTTP_PORT } , new String [ ] { taskType , taskAttemptId . getTaskID ( ) . toString ( ) , taskAttemptId . toString ( ) , String . valueOf ( startTime ) , trackerName , httpPort == - 1 ? "" : String . valueOf ( httpPort ) } ) ; } }
Log start time of this map task attempt .
32,862
public void logMapTaskFinished ( TaskAttemptID taskAttemptId , long finishTime , String hostName , String taskType , String stateString , Counters counter ) { if ( disableHistory ) { return ; } JobID id = taskAttemptId . getJobID ( ) ; if ( ! this . jobId . equals ( id ) ) { throw new RuntimeException ( "JobId from task: " + id + " does not match expected: " + jobId ) ; } if ( null != writers ) { log ( writers , RecordTypes . MapAttempt , new Keys [ ] { Keys . TASK_TYPE , Keys . TASKID , Keys . TASK_ATTEMPT_ID , Keys . TASK_STATUS , Keys . FINISH_TIME , Keys . HOSTNAME , Keys . STATE_STRING , Keys . COUNTERS } , new String [ ] { taskType , taskAttemptId . getTaskID ( ) . toString ( ) , taskAttemptId . toString ( ) , Values . SUCCESS . name ( ) , String . valueOf ( finishTime ) , hostName , stateString , counter . makeEscapedCompactString ( ) } ) ; } }
Log finish time of map task attempt .
32,863
public void logReduceTaskKilled ( TaskAttemptID taskAttemptId , long timestamp , String hostName , String error , String taskType ) { if ( disableHistory ) { return ; } JobID id = taskAttemptId . getJobID ( ) ; if ( ! this . jobId . equals ( id ) ) { throw new RuntimeException ( "JobId from task: " + id + " does not match expected: " + jobId ) ; } if ( null != writers ) { log ( writers , RecordTypes . ReduceAttempt , new Keys [ ] { Keys . TASK_TYPE , Keys . TASKID , Keys . TASK_ATTEMPT_ID , Keys . TASK_STATUS , Keys . FINISH_TIME , Keys . HOSTNAME , Keys . ERROR } , new String [ ] { taskType , taskAttemptId . getTaskID ( ) . toString ( ) , taskAttemptId . toString ( ) , Values . KILLED . name ( ) , String . valueOf ( timestamp ) , hostName , error } ) ; } }
Log killed reduce task attempt .
32,864
protected static void destroyProcess ( String pid , long sleeptimeBeforeSigkill , boolean inBackground ) { terminateProcess ( pid ) ; sigKill ( pid , false , sleeptimeBeforeSigkill , inBackground ) ; }
Destroy the process .
32,865
protected static void destroyProcessGroup ( String pgrpId , long sleeptimeBeforeSigkill , boolean inBackground ) { terminateProcessGroup ( pgrpId ) ; sigKill ( pgrpId , true , sleeptimeBeforeSigkill , inBackground ) ; }
Destroy the process group .
32,866
public static void terminateProcess ( String pid ) { ShellCommandExecutor shexec = null ; String errMsg = null ; try { String [ ] args = { "kill" , pid } ; shexec = new ShellCommandExecutor ( args ) ; shexec . execute ( ) ; } catch ( IOException ioe ) { errMsg = ioe . getMessage ( ) ; } finally { LOG . info ( "Killing process " + pid + " with SIGTERM. Exit code " + shexec . getExitCode ( ) + ( errMsg == null ? "" : " (" + errMsg + ")" ) ) ; } }
Sends terminate signal to the process allowing it to gracefully exit .
32,867
public static void terminateProcessGroup ( String pgrpId ) { ShellCommandExecutor shexec = null ; try { String [ ] args = { "kill" , "--" , "-" + pgrpId } ; shexec = new ShellCommandExecutor ( args ) ; shexec . execute ( ) ; } catch ( IOException ioe ) { LOG . warn ( "Error executing shell command " + ioe ) ; } finally { LOG . info ( "Killing all processes in the process group " + pgrpId + " with SIGTERM. Exit code " + shexec . getExitCode ( ) ) ; } }
Sends terminate signal to all the process belonging to the passed process group allowing the group to gracefully exit .
32,868
public static void killProcess ( String pid ) { if ( ! ProcessTree . isAlive ( pid ) ) { return ; } String [ ] args = { "kill" , "-9" , pid } ; ShellCommandExecutor shexec = new ShellCommandExecutor ( args ) ; try { shexec . execute ( ) ; } catch ( IOException e ) { LOG . warn ( "Error sending SIGKILL to process " + pid + " ." + StringUtils . stringifyException ( e ) ) ; } finally { LOG . info ( "Killing process " + pid + " with SIGKILL. Exit code " + shexec . getExitCode ( ) ) ; } }
Sends kill signal to process forcefully terminating the process .
32,869
public static void killProcessGroup ( String pgrpId ) { if ( ! ProcessTree . isProcessGroupAlive ( pgrpId ) ) { return ; } String [ ] args = { "kill" , "-9" , "-" + pgrpId } ; ShellCommandExecutor shexec = new ShellCommandExecutor ( args ) ; try { shexec . execute ( ) ; } catch ( IOException e ) { LOG . warn ( "Error sending SIGKILL to process group " + pgrpId + " ." + StringUtils . stringifyException ( e ) ) ; } finally { LOG . info ( "Killing process group" + pgrpId + " with SIGKILL. Exit code " + shexec . getExitCode ( ) ) ; } }
Sends kill signal to all process belonging to same process group forcefully terminating the process group .
32,870
public static boolean isAlive ( String pid ) { ShellCommandExecutor shexec = null ; try { String [ ] args = { "kill" , "-0" , pid } ; shexec = new ShellCommandExecutor ( args ) ; shexec . execute ( ) ; } catch ( ExitCodeException ee ) { return false ; } catch ( IOException ioe ) { LOG . warn ( "Error executing shell command " + Arrays . toString ( shexec . getExecString ( ) ) + ioe ) ; return false ; } return ( shexec . getExitCode ( ) == 0 ? true : false ) ; }
Is the process with PID pid still alive? This method assumes that isAlive is called on a pid that was alive not too long ago and hence assumes no chance of pid - wrapping - around .
32,871
private URI decodeHarURI ( URI rawURI , Configuration conf ) throws IOException { String tmpAuth = rawURI . getAuthority ( ) ; if ( tmpAuth == null ) { return FileSystem . getDefaultUri ( conf ) ; } String host = rawURI . getHost ( ) ; String [ ] str = host . split ( "-" , 2 ) ; if ( str [ 0 ] == null ) { throw new IOException ( "URI: " + rawURI + " is an invalid Har URI." ) ; } String underLyingScheme = str [ 0 ] ; String underLyingHost = ( str . length > 1 ) ? str [ 1 ] : null ; int underLyingPort = rawURI . getPort ( ) ; String auth = ( underLyingHost == null && underLyingPort == - 1 ) ? null : ( underLyingHost + ":" + underLyingPort ) ; URI tmp = null ; if ( rawURI . getQuery ( ) != null ) { throw new IOException ( "query component in Path not supported " + rawURI ) ; } try { tmp = new URI ( underLyingScheme , auth , rawURI . getPath ( ) , rawURI . getQuery ( ) , rawURI . getFragment ( ) ) ; } catch ( URISyntaxException e ) { } return tmp ; }
decode the raw URI to get the underlying URI
32,872
public Path getPathInHar ( Path path ) { Path harPath = new Path ( path . toUri ( ) . getPath ( ) ) ; if ( archivePath . compareTo ( harPath ) == 0 ) return new Path ( Path . SEPARATOR ) ; Path tmp = new Path ( harPath . getName ( ) ) ; Path parent = harPath . getParent ( ) ; while ( ! ( parent . compareTo ( archivePath ) == 0 ) ) { if ( parent . toString ( ) . equals ( Path . SEPARATOR ) ) { tmp = null ; break ; } tmp = new Path ( parent . getName ( ) , tmp ) ; parent = parent . getParent ( ) ; } if ( tmp != null ) tmp = new Path ( Path . SEPARATOR , tmp ) ; return tmp ; }
this method returns the path inside the har filesystem . this is relative path inside the har filesystem .
32,873
private Path makeRelative ( String initial , Path p ) { String scheme = this . uri . getScheme ( ) ; String authority = this . uri . getAuthority ( ) ; Path root = new Path ( Path . SEPARATOR ) ; if ( root . compareTo ( p ) == 0 ) return new Path ( scheme , authority , initial ) ; Path retPath = new Path ( p . getName ( ) ) ; Path parent = p . getParent ( ) ; for ( int i = 0 ; i < p . depth ( ) - 1 ; i ++ ) { retPath = new Path ( parent . getName ( ) , retPath ) ; parent = parent . getParent ( ) ; } return new Path ( new Path ( scheme , authority , initial ) , retPath . toString ( ) ) ; }
just use the path api to do it .
32,874
static BlockLocation [ ] fixBlockLocations ( BlockLocation [ ] locations , long start , long len , long fileOffsetInHar ) { long end = start + len ; for ( BlockLocation location : locations ) { long harBlockStart = location . getOffset ( ) - fileOffsetInHar ; long harBlockEnd = harBlockStart + location . getLength ( ) ; if ( start > harBlockStart ) { location . setOffset ( start ) ; location . setLength ( location . getLength ( ) - ( start - harBlockStart ) ) ; } else { location . setOffset ( harBlockStart ) ; } if ( harBlockEnd > end ) { location . setLength ( location . getLength ( ) - ( harBlockEnd - end ) ) ; } } return locations ; }
Fix offset and length of block locations . Note that this method modifies the original array .
32,875
public BlockLocation [ ] getFileBlockLocations ( FileStatus file , long start , long len ) throws IOException { HarStatus hstatus = getFileHarStatus ( file . getPath ( ) , null ) ; Path partPath = new Path ( archivePath , hstatus . getPartName ( ) ) ; FileStatus partStatus = fs . getFileStatus ( partPath ) ; BlockLocation [ ] locations = fs . getFileBlockLocations ( partStatus , hstatus . getStartIndex ( ) + start , len ) ; return fixBlockLocations ( locations , start , len , hstatus . getStartIndex ( ) ) ; }
Get block locations from the underlying fs and fix their offsets and lengths .
32,876
private void fileStatusesInIndex ( HarStatus parent , List < FileStatus > statuses , List < String > children , FileStatus archiveIndexStat ) throws IOException { FSDataInputStream aIn = null ; try { aIn = fs . open ( archiveIndex ) ; LineReader aLin ; long read = 0 ; aLin = new LineReader ( aIn , getConf ( ) ) ; String parentString = parent . getName ( ) ; if ( ! parentString . endsWith ( Path . SEPARATOR ) ) { parentString += Path . SEPARATOR ; } Path harPath = new Path ( parentString ) ; int harlen = harPath . depth ( ) ; Text line = new Text ( ) ; while ( read < archiveIndexStat . getLen ( ) ) { int tmp = aLin . readLine ( line ) ; read += tmp ; String lineFeed = line . toString ( ) ; String child = decodeFileName ( lineFeed . substring ( 0 , lineFeed . indexOf ( " " ) ) ) ; if ( ( child . startsWith ( parentString ) ) ) { Path thisPath = new Path ( child ) ; if ( thisPath . depth ( ) == harlen + 1 ) { HarStatus hstatus = new HarStatus ( lineFeed ) ; FileStatus childStatus = new FileStatus ( hstatus . isDir ( ) ? 0 : hstatus . getLength ( ) , hstatus . isDir ( ) , ( int ) archiveIndexStat . getReplication ( ) , archiveIndexStat . getBlockSize ( ) , hstatus . getModificationTime ( ) , hstatus . getAccessTime ( ) , new FsPermission ( hstatus . getPermission ( ) ) , hstatus . getOwner ( ) , hstatus . getGroup ( ) , makeRelative ( this . uri . getPath ( ) , new Path ( hstatus . getName ( ) ) ) ) ; statuses . add ( childStatus ) ; } line . clear ( ) ; } } } finally { if ( aIn != null ) { aIn . close ( ) ; } } }
Get filestatuses of all the children of a given directory . This just reads through index file and reads line by line to get all statuses for children of a directory . Its a brute force way of getting all such filestatuses
32,877
private String fileStatusInIndex ( Path harPath ) throws IOException { int hashCode = getHarHash ( harPath . toString ( ) ) ; FSDataInputStream in = fs . open ( masterIndex ) ; FileStatus masterStat = fs . getFileStatus ( masterIndex ) ; LineReader lin = new LineReader ( in , getConf ( ) ) ; Text line = new Text ( ) ; long read = lin . readLine ( line ) ; String [ ] readStr = null ; List < Store > stores = new ArrayList < Store > ( ) ; while ( read < masterStat . getLen ( ) ) { int b = lin . readLine ( line ) ; read += b ; readStr = line . toString ( ) . split ( " " ) ; int startHash = Integer . parseInt ( readStr [ 0 ] ) ; int endHash = Integer . parseInt ( readStr [ 1 ] ) ; if ( startHash <= hashCode && hashCode <= endHash ) { stores . add ( new Store ( Long . parseLong ( readStr [ 2 ] ) , Long . parseLong ( readStr [ 3 ] ) , startHash , endHash ) ) ; } line . clear ( ) ; } try { lin . close ( ) ; } catch ( IOException io ) { } FSDataInputStream aIn = fs . open ( archiveIndex ) ; LineReader aLin ; String retStr = null ; for ( Store s : stores ) { read = 0 ; aIn . seek ( s . begin ) ; aLin = new LineReader ( aIn , getConf ( ) ) ; while ( read + s . begin < s . end ) { int tmp = aLin . readLine ( line ) ; read += tmp ; String lineFeed = line . toString ( ) ; String [ ] parsed = lineFeed . split ( " " ) ; parsed [ 0 ] = decodeFileName ( parsed [ 0 ] ) ; if ( harPath . compareTo ( new Path ( parsed [ 0 ] ) ) == 0 ) { retStr = lineFeed ; break ; } line . clear ( ) ; } if ( retStr != null ) break ; } try { aIn . close ( ) ; } catch ( IOException io ) { } return retStr ; }
filename in the index file .
32,878
public FileStatus getFileStatus ( Path f ) throws IOException { FileStatus archiveStatus = fs . getFileStatus ( archiveIndex ) ; HarStatus hstatus = getFileHarStatus ( f , archiveStatus ) ; return new FileStatus ( hstatus . isDir ( ) ? 0 : hstatus . getLength ( ) , hstatus . isDir ( ) , ( int ) archiveStatus . getReplication ( ) , archiveStatus . getBlockSize ( ) , hstatus . getModificationTime ( ) , hstatus . getAccessTime ( ) , new FsPermission ( hstatus . getPermission ( ) ) , hstatus . getOwner ( ) , hstatus . getGroup ( ) , makeRelative ( this . uri . getPath ( ) , new Path ( hstatus . getName ( ) ) ) ) ; }
return the filestatus of files in har archive . The permission returned are that of the archive index files . The permissions are not persisted while creating a hadoop archive .
32,879
public FSDataInputStream open ( Path f , int bufferSize ) throws IOException { HarStatus hstatus = getFileHarStatus ( f , null ) ; if ( hstatus . isDir ( ) ) { throw new FileNotFoundException ( f + " : not a file in " + archivePath ) ; } return new HarFSDataInputStream ( fs , new Path ( archivePath , hstatus . getPartName ( ) ) , hstatus . getStartIndex ( ) , hstatus . getLength ( ) , bufferSize ) ; }
Returns a har input stream which fakes end of file . It reads the index files to get the part file name and the size and start of the file .
32,880
public FileStatus [ ] listStatus ( Path f ) throws IOException { List < FileStatus > statuses = new ArrayList < FileStatus > ( ) ; FileStatus archiveStatus = fs . getFileStatus ( archiveIndex ) ; Path tmpPath = makeQualified ( f ) ; Path harPath = getPathInHar ( tmpPath ) ; String readStr = fileStatusInIndex ( harPath ) ; if ( readStr == null ) { throw new FileNotFoundException ( "File " + f + " not found in " + archivePath ) ; } HarStatus hstatus = new HarStatus ( readStr , archiveStatus , version ) ; if ( ! hstatus . isDir ( ) ) { statuses . add ( new FileStatus ( hstatus . getLength ( ) , hstatus . isDir ( ) , archiveStatus . getReplication ( ) , archiveStatus . getBlockSize ( ) , hstatus . getModificationTime ( ) , hstatus . getAccessTime ( ) , new FsPermission ( hstatus . getPermission ( ) ) , hstatus . getOwner ( ) , hstatus . getGroup ( ) , makeRelative ( this . uri . getPath ( ) , new Path ( hstatus . getName ( ) ) ) ) ) ; } else { fileStatusesInIndex ( hstatus , statuses , hstatus . getChildren ( ) , archiveStatus ) ; } return statuses . toArray ( new FileStatus [ statuses . size ( ) ] ) ; }
liststatus returns the children of a directory after looking up the index files .
32,881
public static BlockMetadataHeader readHeader ( DataInputStream in , Checksum checksumImpl ) throws IOException { return readHeader ( in . readShort ( ) , in , checksumImpl ) ; }
This reads all the fields till the beginning of checksum .
32,882
static BlockMetadataHeader readHeader ( File file ) throws IOException { DataInputStream in = null ; try { in = new DataInputStream ( new BufferedInputStream ( new FileInputStream ( file ) ) ) ; return readHeader ( in ) ; } finally { IOUtils . closeStream ( in ) ; } }
Reads header at the top of metadata file and returns the header .
32,883
private static BlockMetadataHeader readHeader ( short version , DataInputStream in ) throws IOException { DataChecksum checksum = DataChecksum . newDataChecksum ( in ) ; return new BlockMetadataHeader ( version , checksum ) ; }
Version is already read .
32,884
private static void writeHeader ( DataOutputStream out , BlockMetadataHeader header ) throws IOException { out . writeShort ( header . getVersion ( ) ) ; header . getChecksum ( ) . writeHeader ( out ) ; }
This writes all the fields till the beginning of checksum .
32,885
static void writeHeader ( DataOutputStream out , DataChecksum checksum ) throws IOException { writeHeader ( out , new BlockMetadataHeader ( METADATA_VERSION , checksum ) ) ; }
Writes all the fields till the beginning of checksum .
32,886
public synchronized int read ( byte [ ] b , int off , int len ) throws IOException { if ( ( off | len | ( off + len ) | ( b . length - ( off + len ) ) ) < 0 ) { throw new IndexOutOfBoundsException ( ) ; } int n = 0 ; for ( ; ; ) { int nread = read1 ( b , off + n , len - n ) ; if ( nread <= 0 ) return ( n == 0 ) ? nread : n ; n += nread ; if ( n >= len ) return n ; } }
Read checksum verified bytes from this byte - input stream into the specified byte array starting at the given offset .
32,887
static public long checksum2long ( byte [ ] checksum , int offset , int length ) { long crc = 0L ; int iter = 0 ; for ( int i = offset ; i < offset + length ; i ++ , iter ++ ) { crc |= ( 0xffL & ( long ) checksum [ i ] ) << ( ( length - iter - 1 ) * 8 ) ; } return crc ; }
Convert a checksum byte array to a long
32,888
final protected synchronized void set ( boolean verifyChecksum , Checksum sum , int maxChunkSize , int checksumSize ) { this . verifyChecksum = verifyChecksum ; this . sum = sum ; this . buf = new byte [ maxChunkSize ] ; this . checksum = new byte [ checksumSize ] ; this . count = 0 ; this . pos = 0 ; }
Set the checksum related parameters
32,889
void printMap ( PrintWriter out , Map < String , Map < String , List < TagsMetricsPair > > > map ) { for ( Map . Entry < String , Map < String , List < TagsMetricsPair > > > context : map . entrySet ( ) ) { out . println ( context . getKey ( ) ) ; for ( Map . Entry < String , List < TagsMetricsPair > > record : context . getValue ( ) . entrySet ( ) ) { indent ( out , 1 ) ; out . println ( record . getKey ( ) ) ; for ( TagsMetricsPair pair : record . getValue ( ) ) { indent ( out , 2 ) ; out . print ( "{" ) ; boolean first = true ; for ( Map . Entry < String , Object > tagValue : pair . tagMap . entrySet ( ) ) { if ( first ) { first = false ; } else { out . print ( "," ) ; } out . print ( tagValue . getKey ( ) ) ; out . print ( "=" ) ; out . print ( tagValue . getValue ( ) . toString ( ) ) ; } out . println ( "}:" ) ; for ( Map . Entry < String , Number > metricValue : pair . metricMap . entrySet ( ) ) { indent ( out , 3 ) ; out . print ( metricValue . getKey ( ) ) ; out . print ( "=" ) ; out . println ( metricValue . getValue ( ) . toString ( ) ) ; } } } } }
Prints metrics data in a multi - line text form .
32,890
public static String getJobRunState ( int state ) { if ( state < 1 || state >= runStates . length ) { return UNKNOWN ; } return runStates [ state ] ; }
Helper method to get human - readable state of the job .
32,891
public synchronized boolean isJobComplete ( ) { return ( runState == JobStatus . SUCCEEDED || runState == JobStatus . FAILED || runState == JobStatus . KILLED ) ; }
Returns true if the status is for a completed job .
32,892
static int getOldNewJobRunState ( org . apache . hadoop . mapreduce . JobStatus . State state ) { return state . getValue ( ) ; }
A utility to convert new job runstates to the old ones .
32,893
public void setHosts ( String [ ] hosts ) throws IOException { if ( hosts == null ) { this . hosts = new String [ 0 ] ; } else { this . hosts = hosts ; } }
Set the hosts hosting this block
32,894
public void setTopologyPaths ( String [ ] topologyPaths ) throws IOException { if ( topologyPaths == null ) { this . topologyPaths = new String [ 0 ] ; } else { this . topologyPaths = topologyPaths ; } }
Set the network topology paths of the hosts
32,895
public IOException unwrapRemoteException ( ) { try { Class < ? > realClass = Class . forName ( getClassName ( ) ) ; return instantiateException ( realClass . asSubclass ( IOException . class ) ) ; } catch ( Exception e ) { } return this ; }
Instantiate and return the exception wrapped up by this remote exception .
32,896
public void writeXml ( String path , XMLOutputter doc ) throws IOException { doc . startTag ( RemoteException . class . getSimpleName ( ) ) ; doc . attribute ( "path" , path ) ; doc . attribute ( "class" , getClassName ( ) ) ; String msg = getLocalizedMessage ( ) ; int i = msg . indexOf ( "\n" ) ; if ( i >= 0 ) { msg = msg . substring ( 0 , i ) ; } doc . attribute ( "message" , msg . substring ( msg . indexOf ( ":" ) + 1 ) . trim ( ) ) ; doc . endTag ( ) ; }
Write the object to XML format
32,897
public void addDestPath ( String in , Properties repl ) throws IOException { Path dPath = new Path ( in ) ; if ( ! dPath . isAbsolute ( ) || ! dPath . toUri ( ) . isAbsolute ( ) ) { throw new IOException ( "Path " + in + " is not absolute." ) ; } PathInfo pinfo = new PathInfo ( dPath , repl ) ; if ( this . destPath == null ) { this . destPath = new ArrayList < PathInfo > ( ) ; } this . destPath . add ( pinfo ) ; }
Sets the destination path on which this policy has to be applied
32,898
private void cacheNameInternal ( INode inode ) { if ( inode . isDirectory ( ) ) { return ; } ByteArray name = new ByteArray ( inode . getLocalNameBytes ( ) ) ; name = nameCache . put ( name ) ; if ( name != null ) { inode . setLocalName ( name . getBytes ( ) ) ; } }
Adds cached entry to the map and updates INode
32,899
void imageLoaded ( ) throws IOException { for ( Future < Void > task : cachingTasks ) { try { task . get ( ) ; } catch ( InterruptedException e ) { throw new IOException ( "FSDirectory cache received interruption" ) ; } catch ( ExecutionException e ) { throw new IOException ( e ) ; } } this . cachingTasks = null ; this . cachingExecutor . shutdownNow ( ) ; this . cachingExecutor = null ; for ( INode inode : cachingTempQueue ) { cacheNameInternal ( inode ) ; } this . cachingTempQueue = null ; this . imageLoaded = true ; }
Inform that from now on all caching is done synchronously . Cache remaining inodes from the queue .