idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
31,900
protected int randomIntInWindow ( int begin , int windowSize , int n , Set < Integer > excludeSet ) { final int size = Math . min ( windowSize , n ) ; if ( size <= 0 ) { return - 1 ; } int adjustment = 0 ; for ( Integer v : excludeSet ) { int vindex = ( v . intValue ( ) - begin + n ) % n ; if ( vindex < size ) { adjustment ++ ; } } if ( adjustment >= size ) { return - 1 ; } int rindex = r . nextInt ( size - adjustment ) ; int iterator = begin ; for ( int i = 0 ; i <= rindex ; i ++ ) { while ( excludeSet . contains ( iterator ) ) { iterator = ( iterator + 1 ) % n ; } if ( i != rindex ) { iterator = ( iterator + 1 ) % n ; } } return iterator ; }
returns a random integer within a modular window taking into consideration a sorted list of nodes to be excluded .
31,901
public DatanodeDescriptor [ ] chooseTarget ( FSInodeInfo srcInode , int numOfReplicas , DatanodeDescriptor writer , List < DatanodeDescriptor > chosenNodes , List < Node > excludesNodes , long blocksize ) { if ( numOfReplicas == 0 || clusterMap . getNumOfLeaves ( ) == 0 ) { return new DatanodeDescriptor [ 0 ] ; } int [ ] result = getActualReplicas ( numOfReplicas , chosenNodes ) ; numOfReplicas = result [ 0 ] ; int maxNodesPerRack = result [ 1 ] ; HashMap < Node , Node > excludedNodes = new HashMap < Node , Node > ( ) ; List < DatanodeDescriptor > results = new ArrayList < DatanodeDescriptor > ( chosenNodes . size ( ) + numOfReplicas ) ; updateExcludedAndChosen ( null , excludedNodes , results , chosenNodes ) ; if ( ! clusterMap . contains ( writer ) ) { writer = null ; } DatanodeDescriptor localNode = super . chooseTarget ( numOfReplicas , writer , excludedNodes , blocksize , maxNodesPerRack , results , chosenNodes . isEmpty ( ) ) ; return this . finalizeTargets ( results , chosenNodes , writer , localNode ) ; }
This method is currently used only for re - replication and should be used only for this . If this method is used for normal block placements that would completely break this placement policy .
31,902
protected void chooseFirstInRemoteRack ( DatanodeDescriptor localMachine , HashMap < Node , Node > excludedNodes , long blocksize , int maxReplicasPerRack , List < DatanodeDescriptor > results ) throws NotEnoughReplicasException { readLock ( ) ; try { RackRingInfo rackInfo = racksMap . get ( localMachine . getNetworkLocation ( ) ) ; assert ( rackInfo != null ) ; Integer machineId = rackInfo . findNode ( localMachine ) ; assert ( machineId != null ) ; if ( ! chooseRemoteRack ( rackInfo . index , rackInfo . index , rackWindow + 1 , machineId , machineWindow , excludedNodes , blocksize , maxReplicasPerRack , results , false ) ) { LOG . info ( "Couldn't find a Datanode within node group. " + "Resorting to default policy." ) ; super . chooseRemoteRack ( 1 , localMachine , excludedNodes , blocksize , maxReplicasPerRack , results ) ; } } finally { readUnlock ( ) ; } }
Picks up the first replica stored in a remote rack .
31,903
private void findBestWithFirst ( DatanodeDescriptor first , List < DatanodeDescriptor > listOfNodes , DatanodeDescriptor [ ] result ) { for ( int in2 = 0 ; in2 < listOfNodes . size ( ) ; in2 ++ ) { DatanodeDescriptor n2 = listOfNodes . get ( in2 ) ; if ( ! first . equals ( n2 ) ) { if ( result [ 1 ] == null && inWindow ( first , n2 ) ) { result [ 0 ] = first ; result [ 1 ] = n2 ; } for ( int in3 = in2 + 1 ; in3 < listOfNodes . size ( ) ; in3 ++ ) { DatanodeDescriptor n3 = listOfNodes . get ( in3 ) ; if ( ! first . equals ( n3 ) && inWindow ( first , n3 , n2 ) ) { result [ 0 ] = first ; result [ 1 ] = n2 ; result [ 2 ] = n3 ; return ; } } } } }
Function that finds the best partial triple including a first replica
31,904
private boolean inWindow ( DatanodeDescriptor first , DatanodeDescriptor testing ) { readLock ( ) ; try { RackRingInfo rackInfo = racksMap . get ( first . getNetworkLocation ( ) ) ; assert ( rackInfo != null ) ; Integer machineId = rackInfo . findNode ( first ) ; assert ( machineId != null ) ; final int rackWindowStart = rackInfo . index ; final RackRingInfo rackTest = racksMap . get ( testing . getNetworkLocation ( ) ) ; assert ( rackTest != null ) ; final int rackDist = ( rackTest . index - rackWindowStart + racks . size ( ) ) % racks . size ( ) ; if ( rackDist < rackWindow + 1 && rackTest . index != rackInfo . index ) { final Integer idFirst = rackInfo . findNode ( first ) ; assert ( idFirst != null ) ; final int sizeFirstRack = rackInfo . rackNodes . size ( ) ; final int sizeTestRack = rackTest . rackNodes . size ( ) ; final int start = idFirst * sizeTestRack / sizeFirstRack ; final Integer idTest = rackTest . findNode ( testing ) ; assert ( idTest != null ) ; final int dist = ( idTest - start + sizeTestRack ) % sizeTestRack ; if ( dist < machineWindow ) { return true ; } } return false ; } finally { readUnlock ( ) ; } }
Verifies if testing node is within right windows of first node
31,905
private boolean inWindow ( DatanodeDescriptor first , DatanodeDescriptor testing1 , DatanodeDescriptor testing2 ) { readLock ( ) ; try { if ( ! testing1 . getNetworkLocation ( ) . equals ( testing2 . getNetworkLocation ( ) ) ) { return false ; } RackRingInfo rackInfo = racksMap . get ( first . getNetworkLocation ( ) ) ; assert ( rackInfo != null ) ; Integer machineId = rackInfo . findNode ( first ) ; assert ( machineId != null ) ; final int rackWindowStart = rackInfo . index ; final RackRingInfo rackTest = racksMap . get ( testing1 . getNetworkLocation ( ) ) ; assert ( rackTest != null ) ; final int rackDist = ( rackTest . index - rackWindowStart + racks . size ( ) ) % racks . size ( ) ; if ( rackDist < rackWindow + 1 && rackTest . index != rackInfo . index ) { final int rackSize = rackTest . rackNodes . size ( ) ; Integer idN2 = rackTest . findNode ( testing1 ) ; assert ( idN2 != null ) ; Integer idN3 = rackTest . findNode ( testing2 ) ; assert ( idN3 != null ) ; final Integer idFirst = rackInfo . findNode ( first ) ; assert ( idFirst != null ) ; final int sizeFirstRack = rackInfo . rackNodes . size ( ) ; final int end = idFirst * rackSize / sizeFirstRack ; final int prevIdFirst = ( idFirst + sizeFirstRack - 1 ) % sizeFirstRack ; int start = ( prevIdFirst * rackSize / sizeFirstRack ) ; int distPropWindow = ( end - start + rackSize ) % rackSize ; if ( distPropWindow > 0 ) { start = ( start + 1 ) % rackSize ; distPropWindow -- ; } int distIdN2 = ( idN2 - start + rackSize ) % rackSize ; int distIdN3 = ( idN3 - start + rackSize ) % rackSize ; int distN3N2 = ( idN3 - idN2 + rackSize ) % rackSize ; int distN2N3 = ( idN2 - idN3 + rackSize ) % rackSize ; if ( distIdN2 <= distPropWindow && distN3N2 < machineWindow ) return true ; if ( distIdN3 <= distPropWindow && distN2N3 < machineWindow ) return true ; } return false ; } finally { readUnlock ( ) ; } }
Verifies if testing nodes are within right windows of first node
31,906
private void findBestWithoutFirst ( List < DatanodeDescriptor > listOfNodes , DatanodeDescriptor [ ] result ) { readLock ( ) ; try { for ( int in2 = 0 ; in2 < listOfNodes . size ( ) ; in2 ++ ) { DatanodeDescriptor n2 = listOfNodes . get ( in2 ) ; for ( int in3 = in2 + 1 ; in3 < listOfNodes . size ( ) ; in3 ++ ) { DatanodeDescriptor n3 = listOfNodes . get ( in3 ) ; if ( n2 . getNetworkLocation ( ) . equals ( n3 . getNetworkLocation ( ) ) ) { RackRingInfo rackInfo = racksMap . get ( n2 . getNetworkLocation ( ) ) ; assert ( rackInfo != null ) ; final int rackSize = rackInfo . rackNodes . size ( ) ; final Integer idN2 = rackInfo . findNode ( n2 ) ; final Integer idN3 = rackInfo . findNode ( n3 ) ; if ( idN2 != null && idN3 != null ) { int dist = ( idN3 - idN2 + rackSize ) % rackSize ; if ( dist >= machineWindow ) { dist = rackSize - dist ; } if ( dist < machineWindow ) { result [ 0 ] = null ; result [ 1 ] = n2 ; result [ 2 ] = n3 ; return ; } } } } } } finally { readUnlock ( ) ; } }
Finds best match considering only the remote nodes .
31,907
protected boolean chooseRemoteRack ( int rackIdx , int firstRack , int rackWindow , int machineIdx , int windowSize , HashMap < Node , Node > excludedNodes , long blocksize , int maxReplicasPerRack , List < DatanodeDescriptor > results , boolean reverse ) throws NotEnoughReplicasException { readLock ( ) ; try { HashSet < Integer > excludedRacks = new HashSet < Integer > ( ) ; excludedRacks . add ( rackIdx ) ; int n = racks . size ( ) ; int currRackSize = racksMap . get ( racks . get ( rackIdx ) ) . rackNodes . size ( ) ; while ( excludedRacks . size ( ) < rackWindow ) { int newRack = randomIntInWindow ( firstRack , rackWindow , n , excludedRacks ) ; if ( newRack < 0 ) break ; excludedRacks . add ( newRack ) ; int newRackSize = racksMap . get ( racks . get ( newRack ) ) . rackNodes . size ( ) ; int firstMachine = machineIdx * newRackSize / currRackSize ; int newWindowSize = windowSize ; if ( reverse ) { firstMachine = ( ( int ) Math . ceil ( ( double ) machineIdx * newRackSize / currRackSize ) ) % newRackSize ; newWindowSize = Math . max ( 1 , windowSize * newRackSize / currRackSize ) ; } if ( newWindowSize <= 0 ) { continue ; } if ( chooseMachine ( racks . get ( newRack ) , firstMachine , newWindowSize , excludedNodes , blocksize , maxReplicasPerRack , results ) ) { return true ; } } return false ; } finally { readUnlock ( ) ; } }
Picks up a remote machine within defined window
31,908
protected boolean chooseMachine ( String rack , int firstMachine , int windowSize , HashMap < Node , Node > excludedNodes , long blocksize , int maxReplicasPerRack , List < DatanodeDescriptor > results ) { readLock ( ) ; try { HashSet < Integer > excludedMachines = new HashSet < Integer > ( ) ; RackRingInfo rackInfo = racksMap . get ( rack ) ; assert ( rackInfo != null ) ; int n = rackInfo . rackNodesMap . size ( ) ; List < Node > rackDatanodes = clusterMap . getDatanodesInRack ( rack ) ; if ( rackDatanodes == null ) { return false ; } while ( excludedMachines . size ( ) < windowSize ) { int newMachine = randomIntInWindow ( firstMachine , windowSize , n , excludedMachines ) ; if ( newMachine < 0 ) return false ; excludedMachines . add ( newMachine ) ; DatanodeDescriptor chosenNode = null ; for ( Node node : rackDatanodes ) { DatanodeDescriptor datanode = ( DatanodeDescriptor ) node ; Integer idx = rackInfo . findNode ( datanode ) ; if ( idx != null && idx . intValue ( ) == newMachine ) { chosenNode = datanode ; break ; } } if ( chosenNode == null ) continue ; Node oldNode = excludedNodes . put ( chosenNode , chosenNode ) ; if ( oldNode == null ) { if ( isGoodTarget ( chosenNode , blocksize , maxReplicasPerRack , results ) ) { results . add ( chosenNode ) ; return true ; } } } return false ; } finally { readUnlock ( ) ; } }
Chosses a machine within a window inside a rack
31,909
public void incr ( ResourceType resourceType ) { Integer current = typeToCountMap . get ( resourceType ) ; if ( current == null ) { typeToCountMap . put ( resourceType , new Integer ( 1 ) ) ; } else { typeToCountMap . put ( resourceType , current + 1 ) ; } }
Increment the count for a given resource type
31,910
public Integer getCount ( ResourceType resourceType ) { Integer count = typeToCountMap . get ( resourceType ) ; if ( count == null ) { return 0 ; } else { return count ; } }
Return the count of the ResourceType
31,911
public synchronized void skipTo ( final long newValue ) throws IllegalStateException { if ( newValue < currentValue ) { throw new IllegalStateException ( "Cannot skip to less than the current value (=" + currentValue + "), where newValue=" + newValue ) ; } currentValue = newValue ; }
Skip to the new value .
31,912
static void addCodec ( RaidCodec codec ) { List < RaidCodec > newCodecs = new ArrayList < RaidCodec > ( ) ; newCodecs . addAll ( codecs ) ; newCodecs . add ( codec ) ; codecs = Collections . unmodifiableList ( newCodecs ) ; Map < String , RaidCodec > newIdToCodec = new HashMap < String , RaidCodec > ( ) ; newIdToCodec . putAll ( idToCodec ) ; newIdToCodec . put ( codec . id , codec ) ; idToCodec = Collections . unmodifiableMap ( newIdToCodec ) ; }
Used by unit test only
31,913
public BlockInfo [ ] getSourceBlocks ( BlockInfo [ ] blocks ) { int numSourceBlocks = blocks . length - ( blocks . length / numStripeBlocks ) * numParityBlocks - ( ( blocks . length % numStripeBlocks == 0 ) ? 0 : numParityBlocks ) ; BlockInfo [ ] sourceBlocks = new BlockInfo [ numSourceBlocks ] ; int pos = numParityBlocks ; int stripeEnd = numStripeBlocks ; for ( int i = 0 ; i < numSourceBlocks ; i ++ ) { sourceBlocks [ i ] = blocks [ pos ] ; pos ++ ; if ( pos == stripeEnd ) { pos += numParityBlocks ; stripeEnd += numStripeBlocks ; } } return sourceBlocks ; }
Return only the source blocks of the raided file
31,914
public BlockInfo [ ] getParityBlocks ( BlockInfo [ ] blocks ) { int numBlocks = ( blocks . length / numStripeBlocks ) * numParityBlocks + ( ( blocks . length % numStripeBlocks == 0 ) ? 0 : numParityBlocks ) ; BlockInfo [ ] parityBlocks = new BlockInfo [ numBlocks ] ; int pos = 0 ; int parityEnd = numParityBlocks ; for ( int i = 0 ; i < numBlocks ; i ++ ) { parityBlocks [ i ] = blocks [ pos ] ; pos ++ ; if ( pos == parityEnd ) { pos += numDataBlocks ; parityEnd += numStripeBlocks ; } } return parityBlocks ; }
Return only the parity blocks of the raided file
31,915
public boolean checkRaidProgress ( INodeFile sourceINode , LightWeightLinkedSet < RaidBlockInfo > raidEncodingTasks , FSNamesystem fs , boolean forceAdd ) throws IOException { boolean result = true ; BlockInfo [ ] blocks = sourceINode . getBlocks ( ) ; for ( int i = 0 ; i < blocks . length ; i += numStripeBlocks ) { boolean hasParity = true ; if ( ! forceAdd ) { for ( int j = 0 ; j < numParityBlocks ; j ++ ) { if ( fs . countLiveNodes ( blocks [ i + j ] ) < this . parityReplication ) { hasParity = false ; break ; } } } if ( ! hasParity || forceAdd ) { raidEncodingTasks . add ( new RaidBlockInfo ( blocks [ i ] , parityReplication , i ) ) ; result = false ; } } return result ; }
Count the number of live replicas of each parity block in the raided file If any stripe has not enough parity block replicas add the stripe to raidEncodingTasks to schedule encoding . If forceAdd is true we always add the stripe to raidEncodingTasks without checking
31,916
private void handleNullRead ( ) throws IOException { if ( curStreamFinished && readNullAfterStreamFinished ) { curStreamConsumed = true ; } else { try { Thread . sleep ( 100 ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } if ( curStreamFinished ) readNullAfterStreamFinished = true ; refreshStreamPosition ( ) ; }
Called after we read a null operation from the transaction log .
31,917
private void tryReloadingEditLog ( ) throws IOException { LOG . info ( "Trying to reload the edit log ..." ) ; if ( rollImageCount . get ( ) == 1 ) { try { LOG . info ( "Trying to reload the edit log from " + editsFile . getAbsolutePath ( ) ) ; openInputStream ( editsFile ) ; LOG . info ( "Successfully reloaded the edit log from " + editsFile . getAbsolutePath ( ) + ". Trying to refresh position." ) ; refreshStreamPosition ( ) ; LOG . info ( "Successfully refreshed stream position" ) ; return ; } catch ( IOException e ) { LOG . warn ( "Failed to reload from " + editsFile . getAbsolutePath ( ) , e ) ; } } try { LOG . info ( "Trying to reload the edit log from " + editsNewFile . getAbsolutePath ( ) ) ; openInputStream ( editsNewFile ) ; LOG . info ( "Successfully reloaded the edit log from " + editsNewFile . getAbsolutePath ( ) + ". Trying to refresh position." ) ; refreshStreamPosition ( ) ; LOG . info ( "Successfully refreshed stream position" ) ; return ; } catch ( IOException e ) { LOG . error ( "Failed to reload from " + editsFile . getAbsolutePath ( ) , e ) ; throw e ; } }
Tries to fully reload the edit log .
31,918
private void trySwitchingEditLog ( ) throws IOException { if ( shouldSwitchEditLog ( ) ) { curStreamFinished = true ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Should switch edit log. rollImageCount=" + rollImageCount + ". curStreamConsumed=" + curStreamConsumed ) ; } if ( curStreamConsumed ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Reloading edit log ..." ) ; } openEditLog ( ) ; rollImageCount . decrementAndGet ( ) ; } } }
Tries to switch from the previous transaction file to the new one . It ensures that no notifications are missed . If there is a possibility that the current transaction log file has more notifications to be read then it will keep the current stream intact .
31,919
private void openInputStream ( File txFile ) throws IOException { int ioExceptionRetryCount = 0 , logHeaderCorruptRetryCount = 0 ; LOG . info ( "Trying to load the edit log from " + txFile . getAbsolutePath ( ) ) ; do { try { inputStream = new EditLogFileInputStream ( txFile ) ; editLogFilePosition = inputStream . getPosition ( ) ; curStreamConsumed = false ; curStreamFinished = false ; readNullAfterStreamFinished = false ; LOG . info ( "Successfully loaded the edits log from " + txFile . getAbsolutePath ( ) ) ; break ; } catch ( LogHeaderCorruptException e1 ) { if ( logHeaderCorruptRetryCount == LOG_HEADER_CORRUPT_RETRY_MAX ) { LOG . error ( "Failed to load the edit log. No retries left." , e1 ) ; throw new IOException ( "Could not load the edit log" ) ; } logHeaderCorruptRetryCount ++ ; LOG . warn ( "Failed to load the edit log. Retry " + logHeaderCorruptRetryCount + " ..." , e1 ) ; try { Thread . sleep ( ioExceptionRetryCount * LOG_HEADER_CORRUPT_BASE_SLEEP ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } catch ( IOException e2 ) { if ( ioExceptionRetryCount == IO_EXCEPTION_RETRY_MAX ) { LOG . error ( "Failed to load the edit log. No retries left." , e2 ) ; throw new IOException ( "Could not load the edit log" ) ; } ioExceptionRetryCount ++ ; LOG . warn ( "Failed to load the edit log. Retry " + ioExceptionRetryCount + " ..." , e2 ) ; try { Thread . sleep ( ioExceptionRetryCount * IO_EXCEPTION_BASE_SLEEP ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } } while ( true ) ; }
Tries opening the input stream .
31,920
public static Path getLocalCache ( URI cache , Configuration conf , Path subdir , FileStatus fileStatus , boolean isArchive , long confFileStamp , Path currentWorkDir , boolean honorSymLinkConf , MRAsyncDiskService asyncDiskService , LocalDirAllocator lDirAllocator ) throws IOException { return getLocalCache ( cache , conf , subdir , fileStatus , isArchive , confFileStamp , fileStatus . getLen ( ) , currentWorkDir , honorSymLinkConf , asyncDiskService , lDirAllocator ) ; }
Added for back compatibility .
31,921
public static void releaseCache ( URI cache , Configuration conf , long timeStamp ) throws IOException { String cacheId = getKey ( cache , conf , timeStamp ) ; synchronized ( cachedArchives ) { CacheStatus lcacheStatus = cachedArchives . get ( cacheId ) ; if ( lcacheStatus == null ) { LOG . warn ( "Cannot find localized cache: " + cache + " (key: " + cacheId + ") in releaseCache!" ) ; return ; } lcacheStatus . refcount -- ; } }
This is the opposite of getlocalcache . When you are done with using the cache you need to release the cache
31,922
private static void deleteCache ( Configuration conf , MRAsyncDiskService asyncDiskService ) throws IOException { List < CacheStatus > deleteSet = new LinkedList < CacheStatus > ( ) ; synchronized ( cachedArchives ) { for ( Iterator < String > it = cachedArchives . keySet ( ) . iterator ( ) ; it . hasNext ( ) ; ) { String cacheId = ( String ) it . next ( ) ; CacheStatus lcacheStatus = cachedArchives . get ( cacheId ) ; if ( lcacheStatus . refcount == 0 ) { deleteSet . add ( lcacheStatus ) ; it . remove ( ) ; } } } Thread cacheFileCleaner = new Thread ( new CacheFileCleanTask ( asyncDiskService , FileSystem . getLocal ( conf ) , deleteSet ) ) ; cacheFileCleaner . start ( ) ; }
To delete the caches which have a refcount of zero
31,923
private static void deleteLocalPath ( MRAsyncDiskService asyncDiskService , LocalFileSystem fs , Path path ) throws IOException { boolean deleted = false ; if ( asyncDiskService != null ) { String localPathToDelete = path . toUri ( ) . getPath ( ) ; deleted = asyncDiskService . moveAndDeleteAbsolutePath ( localPathToDelete ) ; if ( ! deleted ) { LOG . warn ( "Cannot find DistributedCache path " + localPathToDelete + " on any of the asyncDiskService volumes!" ) ; } } if ( ! deleted ) { fs . delete ( path , true ) ; } LOG . info ( "Deleted path " + path ) ; }
Delete a local path with asyncDiskService if available or otherwise synchronously with local file system .
31,924
private static Path localizeCache ( Configuration conf , URI cache , long confFileStamp , CacheStatus cacheStatus , boolean isArchive ) throws IOException { FileSystem fs = getFileSystem ( cache , conf ) ; FileSystem localFs = FileSystem . getLocal ( conf ) ; Path parchive = null ; if ( isArchive ) { parchive = new Path ( cacheStatus . localizedLoadPath , new Path ( cacheStatus . localizedLoadPath . getName ( ) ) ) ; } else { parchive = cacheStatus . localizedLoadPath ; } if ( ! localFs . mkdirs ( parchive . getParent ( ) ) ) { throw new IOException ( "Mkdirs failed to create directory " + cacheStatus . localizedLoadPath . toString ( ) ) ; } String cacheId = cache . getPath ( ) ; fs . copyToLocalFile ( new Path ( cacheId ) , parchive ) ; if ( isArchive ) { String tmpArchive = parchive . toString ( ) . toLowerCase ( ) ; File srcFile = new File ( parchive . toString ( ) ) ; File destDir = new File ( parchive . getParent ( ) . toString ( ) ) ; if ( tmpArchive . endsWith ( ".jar" ) ) { RunJar . unJar ( srcFile , destDir ) ; } else if ( tmpArchive . endsWith ( ".zip" ) ) { FileUtil . unZip ( srcFile , destDir ) ; } else if ( isTarFile ( tmpArchive ) ) { FileUtil . unTar ( srcFile , destDir ) ; } } long cacheSize = FileUtil . getDU ( new File ( parchive . getParent ( ) . toString ( ) ) ) ; cacheStatus . size = cacheSize ; addCacheInfoUpdate ( cacheStatus ) ; try { Path localDir = new Path ( cacheStatus . localizedBaseDir , cacheStatus . uniqueParentDir ) ; LOG . info ( "Doing chmod on localdir :" + localDir ) ; FileUtil . chmod ( localDir . toString ( ) , "ugo+rx" , true ) ; } catch ( InterruptedException e ) { LOG . warn ( "Exception in chmod" + e . toString ( ) ) ; } cacheStatus . mtime = getTimestamp ( conf , cache ) ; return cacheStatus . localizedLoadPath ; }
and does chmod for the files
31,925
private static boolean ifExistsAndFresh ( Configuration conf , FileSystem fs , URI cache , long confFileStamp , CacheStatus lcacheStatus , FileStatus fileStatus ) throws IOException { long dfsFileStamp ; if ( fileStatus != null ) { dfsFileStamp = fileStatus . getModificationTime ( ) ; } else { dfsFileStamp = getTimestamp ( conf , cache ) ; } if ( dfsFileStamp != confFileStamp ) { LOG . fatal ( "File: " + cache + " has changed on HDFS since job started" ) ; throw new IOException ( "File: " + cache + " has changed on HDFS since job started" ) ; } if ( dfsFileStamp != lcacheStatus . mtime ) { return false ; } return true ; }
Checks if the cache has already been localized and is fresh
31,926
public static long getTimestamp ( Configuration conf , URI cache ) throws IOException { FileSystem fileSystem = FileSystem . get ( cache , conf ) ; Path filePath = new Path ( cache . getPath ( ) ) ; return fileSystem . getFileStatus ( filePath ) . getModificationTime ( ) ; }
Returns mtime of a given cache file on hdfs .
31,927
public static void createAllSymlink ( Configuration conf , File jobCacheDir , File workDir ) throws IOException { if ( ( jobCacheDir == null || ! jobCacheDir . isDirectory ( ) ) || workDir == null || ( ! workDir . isDirectory ( ) ) ) { return ; } boolean createSymlink = getSymlink ( conf ) ; if ( createSymlink ) { File [ ] list = jobCacheDir . listFiles ( ) ; for ( int i = 0 ; i < list . length ; i ++ ) { FileUtil . symLink ( list [ i ] . getAbsolutePath ( ) , new File ( workDir , list [ i ] . getName ( ) ) . toString ( ) ) ; } } }
This method create symlinks for all files in a given dir in another directory
31,928
public static void setCacheArchives ( URI [ ] archives , Configuration conf ) { String sarchives = StringUtils . uriToString ( archives ) ; conf . set ( "mapred.cache.archives" , sarchives ) ; }
Set the configuration with the given set of archives
31,929
public static void setCacheFiles ( URI [ ] files , Configuration conf ) { String sfiles = StringUtils . uriToString ( files ) ; conf . set ( "mapred.cache.files" , sfiles ) ; }
Set the configuration with the given set of files
31,930
public static void addCacheArchive ( URI uri , Configuration conf ) { String archives = conf . get ( "mapred.cache.archives" ) ; conf . set ( "mapred.cache.archives" , archives == null ? uri . toString ( ) : archives + "," + uri . toString ( ) ) ; }
Add a archives to be localized to the conf
31,931
public static void addCacheFile ( URI uri , Configuration conf ) { String files = conf . get ( "mapred.cache.files" ) ; conf . set ( "mapred.cache.files" , files == null ? uri . toString ( ) : files + "," + uri . toString ( ) ) ; }
Add a file to be localized to the conf
31,932
public static void addFileToClassPath ( Path file , Configuration conf ) throws IOException { String classpath = conf . get ( "mapred.job.classpath.files" ) ; conf . set ( "mapred.job.classpath.files" , classpath == null ? file . toString ( ) : classpath + System . getProperty ( "path.separator" ) + file . toString ( ) ) ; URI uri = file . makeQualified ( file . getFileSystem ( conf ) ) . toUri ( ) ; addCacheFile ( uri , conf ) ; }
Add an file path to the current set of classpath entries It adds the file to cache as well .
31,933
public static Path [ ] getArchiveClassPaths ( Configuration conf ) { String classpath = conf . get ( "mapred.job.classpath.archives" ) ; if ( classpath == null ) return null ; ArrayList list = Collections . list ( new StringTokenizer ( classpath , System . getProperty ( "path.separator" ) ) ) ; Path [ ] paths = new Path [ list . size ( ) ] ; for ( int i = 0 ; i < list . size ( ) ; i ++ ) { paths [ i ] = new Path ( ( String ) list . get ( i ) ) ; } return paths ; }
Get the archive entries in classpath as an array of Path
31,934
public static boolean getSymlink ( Configuration conf ) { String result = conf . get ( "mapred.create.symlink" ) ; if ( "yes" . equals ( result ) ) { return true ; } return false ; }
This method checks to see if symlinks are to be create for the localized cache files in the current working directory
31,935
public static boolean checkURIs ( URI [ ] uriFiles , URI [ ] uriArchives ) { if ( ( uriFiles == null ) && ( uriArchives == null ) ) { return true ; } if ( uriFiles != null ) { for ( int i = 0 ; i < uriFiles . length ; i ++ ) { String frag1 = uriFiles [ i ] . getFragment ( ) ; if ( frag1 == null ) return false ; for ( int j = i + 1 ; j < uriFiles . length ; j ++ ) { String frag2 = uriFiles [ j ] . getFragment ( ) ; if ( frag2 == null ) return false ; if ( frag1 . equalsIgnoreCase ( frag2 ) ) return false ; } if ( uriArchives != null ) { for ( int j = 0 ; j < uriArchives . length ; j ++ ) { String frag2 = uriArchives [ j ] . getFragment ( ) ; if ( frag2 == null ) { return false ; } if ( frag1 . equalsIgnoreCase ( frag2 ) ) return false ; for ( int k = j + 1 ; k < uriArchives . length ; k ++ ) { String frag3 = uriArchives [ k ] . getFragment ( ) ; if ( frag3 == null ) return false ; if ( frag2 . equalsIgnoreCase ( frag3 ) ) return false ; } } } } } return true ; }
This method checks if there is a conflict in the fragment names of the uris . Also makes sure that each uri has a fragment . It is only to be called if you want to create symlinks for the various archives and files .
31,936
public static void purgeCache ( Configuration conf , MRAsyncDiskService service ) throws IOException { synchronized ( cachedArchives ) { LocalFileSystem localFs = FileSystem . getLocal ( conf ) ; for ( Map . Entry < String , CacheStatus > f : cachedArchives . entrySet ( ) ) { try { deleteLocalPath ( service , localFs , f . getValue ( ) . localizedLoadPath ) ; } catch ( IOException ie ) { LOG . debug ( "Error cleaning up cache" , ie ) ; } } cachedArchives . clear ( ) ; } }
Clear the entire contents of the cache and delete the backing files . This should only be used when the server is reinitializing because the users are going to lose their files .
31,937
private static void deleteCacheInfoUpdate ( CacheStatus cacheStatus ) { if ( ! cacheStatus . isInited ( ) ) { return ; } synchronized ( baseDirSize ) { Long dirSize = baseDirSize . get ( cacheStatus . getBaseDir ( ) ) ; if ( dirSize != null ) { dirSize -= cacheStatus . size ; baseDirSize . put ( cacheStatus . getBaseDir ( ) , dirSize ) ; } } synchronized ( baseDirNumberSubDir ) { Integer dirSubDir = baseDirNumberSubDir . get ( cacheStatus . getBaseDir ( ) ) ; if ( dirSubDir != null ) { dirSubDir -- ; baseDirNumberSubDir . put ( cacheStatus . getBaseDir ( ) , dirSubDir ) ; } } }
Update the maps baseDirSize and baseDirNumberSubDir when deleting cache .
31,938
private static void addCacheInfoUpdate ( CacheStatus cacheStatus ) { long cacheSize = cacheStatus . size ; synchronized ( baseDirSize ) { Long dirSize = baseDirSize . get ( cacheStatus . getBaseDir ( ) ) ; if ( dirSize == null ) { dirSize = Long . valueOf ( cacheSize ) ; } else { dirSize += cacheSize ; } baseDirSize . put ( cacheStatus . getBaseDir ( ) , dirSize ) ; } synchronized ( baseDirNumberSubDir ) { Integer dirSubDir = baseDirNumberSubDir . get ( cacheStatus . getBaseDir ( ) ) ; if ( dirSubDir == null ) { dirSubDir = 1 ; } else { dirSubDir += 1 ; } baseDirNumberSubDir . put ( cacheStatus . getBaseDir ( ) , dirSubDir ) ; } }
Update the maps baseDirSize and baseDirNumberSubDir when adding cache .
31,939
public static URI getDefaultUri ( Configuration conf ) { return URI . create ( fixName ( conf . get ( FS_DEFAULT_NAME_KEY , "file:///" ) ) ) ; }
Get the default filesystem URI from a configuration .
31,940
public static LocalFileSystem getLocal ( Configuration conf ) throws IOException { return ( LocalFileSystem ) get ( LocalFileSystem . NAME , conf ) ; }
Get the local file syste
31,941
public static LocalFileSystem newInstanceLocal ( Configuration conf ) throws IOException { return ( LocalFileSystem ) newInstance ( LocalFileSystem . NAME , conf ) ; }
Get a unique local file system object
31,942
public static FSDataOutputStream create ( FileSystem fs , Path file , FsPermission permission ) throws IOException { FSDataOutputStream out = fs . create ( file ) ; fs . setPermission ( file , permission ) ; return out ; }
create a file with the provided permission The permission of the file is set to be the provided permission as in setPermission not permission&~umask
31,943
public static boolean mkdirs ( FileSystem fs , Path dir , FsPermission permission ) throws IOException { boolean result = fs . mkdirs ( dir ) ; fs . setPermission ( dir , permission ) ; return result ; }
create a directory with the provided permission The permission of the directory is set to be the provided permission as in setPermission not permission&~umask
31,944
protected void checkPath ( Path path ) { URI uri = path . toUri ( ) ; if ( uri . getScheme ( ) == null ) return ; String thisScheme = this . getUri ( ) . getScheme ( ) ; String thatScheme = uri . getScheme ( ) ; String thisAuthority = this . getUri ( ) . getAuthority ( ) ; String thatAuthority = uri . getAuthority ( ) ; if ( thisScheme . equalsIgnoreCase ( thatScheme ) ) { if ( thisAuthority == thatAuthority || ( thisAuthority != null && thisAuthority . equalsIgnoreCase ( thatAuthority ) ) ) return ; if ( thatAuthority == null && thisAuthority != null ) { URI defaultUri = getDefaultUri ( getConf ( ) ) ; if ( thisScheme . equalsIgnoreCase ( defaultUri . getScheme ( ) ) && thisAuthority . equalsIgnoreCase ( defaultUri . getAuthority ( ) ) ) return ; try { defaultUri = get ( getConf ( ) ) . getUri ( ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } if ( thisScheme . equalsIgnoreCase ( defaultUri . getScheme ( ) ) && thisAuthority . equalsIgnoreCase ( defaultUri . getAuthority ( ) ) ) return ; } } throw new IllegalArgumentException ( "Wrong FS: " + path + ", expected: " + this . getUri ( ) ) ; }
Check that a Path belongs to this FileSystem .
31,945
public BlockLocation [ ] getFileBlockLocations ( FileStatus file , long start , long len ) throws IOException { if ( file == null ) { return null ; } if ( ( start < 0 ) || ( len < 0 ) ) { throw new IllegalArgumentException ( "Invalid start or len parameter" ) ; } if ( file . getLen ( ) < start ) { return new BlockLocation [ 0 ] ; } String [ ] name = { "localhost:50010" } ; String [ ] host = { "localhost" } ; return new BlockLocation [ ] { new BlockLocation ( name , host , 0 , file . getLen ( ) ) } ; }
Return an array containing hostnames offset and size of portions of the given file . For a nonexistent file or regions null will be returned .
31,946
public FSDataOutputStream create ( Path f , Progressable progress ) throws IOException { return create ( f , CreateOptions . progress ( progress ) ) ; }
Create an FSDataOutputStream at the indicated Path with write - progress reporting . Files are overwritten by default .
31,947
public FSDataOutputStream create ( Path f , FsPermission permission , boolean overwrite , int bufferSize , short replication , long blockSize , int bytesPerChecksum , Progressable progress ) throws IOException { return create ( f , CreateOptions . perms ( permission ) , CreateOptions . writeOptions ( overwrite , null ) , CreateOptions . bufferSize ( bufferSize ) , CreateOptions . replicationFactor ( replication ) , CreateOptions . blockSize ( blockSize ) , CreateOptions . bytesPerChecksum ( bytesPerChecksum ) , CreateOptions . progress ( progress ) ) ; }
Opens an FSDataOutputStream at the indicated Path with write - progress reporting .
31,948
public boolean createNewFile ( Path f ) throws IOException { if ( exists ( f ) ) { return false ; } else { create ( f , false , getDefaultBufferSize ( ) ) . close ( ) ; return true ; } }
Creates the given Path as a brand - new zero - length file . If create fails or if it already existed return false .
31,949
public boolean deleteOnExit ( Path f ) throws IOException { if ( ! exists ( f ) ) { return false ; } synchronized ( deleteOnExit ) { deleteOnExit . add ( f ) ; } return true ; }
Mark a path to be deleted when FileSystem is closed . When the JVM shuts down all FileSystem objects will be closed automatically . Then the marked path will be deleted as a result of closing the FileSystem .
31,950
protected void processDeleteOnExit ( ) { synchronized ( deleteOnExit ) { for ( Iterator < Path > iter = deleteOnExit . iterator ( ) ; iter . hasNext ( ) ; ) { Path path = iter . next ( ) ; try { delete ( path , true ) ; } catch ( IOException e ) { LOG . info ( "Ignoring failure to deleteOnExit for path " + path ) ; } iter . remove ( ) ; } } }
Delete all files that were marked as delete - on - exit . This recursively deletes all files in the specified paths .
31,951
public boolean isFile ( Path f ) throws IOException { try { return ! getFileStatus ( f ) . isDir ( ) ; } catch ( FileNotFoundException e ) { return false ; } }
True iff the named path is a regular file .
31,952
public RemoteIterator < LocatedFileStatus > listLocatedStatus ( final Path f , final PathFilter filter ) throws FileNotFoundException , IOException { return new RemoteIterator < LocatedFileStatus > ( ) { private final FileStatus [ ] stats ; private int i = 0 ; { stats = listStatus ( f , filter ) ; if ( stats == null ) { throw new FileNotFoundException ( "File " + f + " does not exist." ) ; } } public boolean hasNext ( ) { return i < stats . length ; } public LocatedFileStatus next ( ) throws IOException { if ( ! hasNext ( ) ) { throw new NoSuchElementException ( "No more entry in " + f ) ; } FileStatus result = stats [ i ++ ] ; BlockLocation [ ] locs = result . isDir ( ) ? null : getFileBlockLocations ( result , 0 , result . getLen ( ) ) ; return new LocatedFileStatus ( result , locs ) ; } } ; }
Listing a directory The returned results include its block location if it is a file The results are filtered by the given path filter
31,953
public RemoteIterator < LocatedBlockFileStatus > listLocatedBlockStatus ( final Path f , final PathFilter filter ) throws FileNotFoundException , IOException { return new RemoteIterator < LocatedBlockFileStatus > ( ) { private final FileStatus [ ] stats ; private int i = 0 ; { stats = listStatus ( f , filter ) ; if ( stats == null ) { throw new FileNotFoundException ( "File " + f + " does not exist." ) ; } } public boolean hasNext ( ) { return i < stats . length ; } public LocatedBlockFileStatus next ( ) throws IOException { if ( ! hasNext ( ) ) { throw new NoSuchElementException ( "No more entry in " + f ) ; } FileStatus result = stats [ i ++ ] ; BlockAndLocation [ ] locs = null ; if ( ! result . isDir ( ) ) { String [ ] name = { "localhost:50010" } ; String [ ] host = { "localhost" } ; locs = new BlockAndLocation [ ] { new BlockAndLocation ( 0L , 0L , name , host , new String [ 0 ] , 0 , result . getLen ( ) , false ) } ; } return new LocatedBlockFileStatus ( result , locs , false ) ; } } ; }
Listing a directory The returned results include its blocks and locations if it is a file The results are filtered by the given path filter
31,954
public FileStatus [ ] globStatus ( Path pathPattern , PathFilter filter ) throws IOException { String filename = pathPattern . toUri ( ) . getPath ( ) ; List < String > filePatterns = GlobExpander . expand ( filename ) ; if ( filePatterns . size ( ) == 1 ) { return globStatusInternal ( pathPattern , filter ) ; } else { List < FileStatus > results = new ArrayList < FileStatus > ( ) ; for ( String filePattern : filePatterns ) { FileStatus [ ] files = globStatusInternal ( new Path ( filePattern ) , filter ) ; for ( FileStatus file : files ) { results . add ( file ) ; } } return results . toArray ( new FileStatus [ results . size ( ) ] ) ; } }
Return an array of FileStatus objects whose path names match pathPattern and is accepted by the user - supplied path filter . Results are sorted by their path names . Return null if pathPattern has no glob and the path does not exist . Return an empty array if pathPattern has a glob and no path matches it .
31,955
public Path getHomeDirectory ( String userName ) { if ( userName == null ) userName = System . getProperty ( "user.name" ) ; return new Path ( "/user/" + userName ) . makeQualified ( this ) ; }
Return the home directory for a given user in this filesystem .
31,956
public void copyFromLocalFile ( Path src , Path dst ) throws IOException { copyFromLocalFile ( false , true , false , src , dst ) ; }
The src file is on the local disk . Add it to FS at the given dst name and the source is kept intact afterwards
31,957
public void moveFromLocalFile ( Path [ ] srcs , Path dst ) throws IOException { copyFromLocalFile ( true , true , false , srcs , dst ) ; }
The src files is on the local disk . Add it to FS at the given dst name removing the source afterwards .
31,958
public void moveFromLocalFile ( Path src , Path dst ) throws IOException { copyFromLocalFile ( true , true , false , src , dst ) ; }
The src file is on the local disk . Add it to FS at the given dst name removing the source afterwards .
31,959
public void copyFromLocalFile ( boolean delSrc , boolean overwrite , boolean validate , Path src , Path dst ) throws IOException { Configuration conf = getConf ( ) ; FileUtil . copy ( getLocal ( conf ) , src , this , dst , delSrc , overwrite , validate , conf ) ; }
copy a file from local to a file in this file system
31,960
public void copyToLocalFile ( Path src , Path dst ) throws IOException { copyToLocalFile ( false , false , src , dst ) ; }
The src file is under FS and the dst is on the local disk . Copy it from FS control to the local dst name .
31,961
public void moveToLocalFile ( Path src , Path dst ) throws IOException { copyToLocalFile ( true , false , src , dst ) ; }
The src file is under FS and the dst is on the local disk . Copy it from FS control to the local dst name . Remove the source afterwards
31,962
public void copyToLocalFile ( boolean delSrc , boolean validate , Path src , Path dst ) throws IOException { FileUtil . copy ( this , src , getLocal ( getConf ( ) ) , dst , delSrc , validate , getConf ( ) ) ; }
Copy a file from this file system to local
31,963
public long getUsed ( ) throws IOException { long used = 0 ; FileStatus [ ] files = listStatus ( new Path ( "/" ) ) ; for ( FileStatus file : files ) { used += file . getLen ( ) ; } return used ; }
Return the total size of all files in the filesystem .
31,964
private FileStatus [ ] getFileStatus ( Path [ ] paths ) throws IOException { if ( paths == null ) { return null ; } ArrayList < FileStatus > results = new ArrayList < FileStatus > ( paths . length ) ; for ( int i = 0 ; i < paths . length ; i ++ ) { try { results . add ( getFileStatus ( paths [ i ] ) ) ; } catch ( FileNotFoundException e ) { } } return results . toArray ( new FileStatus [ results . size ( ) ] ) ; }
Return a list of file status objects that corresponds to the list of paths excluding those non - existent paths .
31,965
public static synchronized Map < String , Statistics > getStatistics ( ) { Map < String , Statistics > result = new HashMap < String , Statistics > ( ) ; for ( Statistics stat : statisticsTable . values ( ) ) { result . put ( stat . getScheme ( ) , stat ) ; } return result ; }
Get the Map of Statistics object indexed by URI Scheme .
31,966
public static synchronized Statistics getStatistics ( String scheme , Class < ? extends FileSystem > cls ) { Statistics result = statisticsTable . get ( cls ) ; if ( result == null ) { result = new Statistics ( scheme ) ; statisticsTable . put ( cls , result ) ; } return result ; }
Get the statistics for a particular file system
31,967
static NamespaceNotification createNotification ( FSEditLogOp op ) { switch ( op . opCode ) { case OP_ADD : return new NamespaceNotification ( ( ( AddOp ) op ) . path , EventType . FILE_ADDED . getByteValue ( ) , op . getTransactionId ( ) ) ; case OP_CLOSE : return new NamespaceNotification ( ( ( CloseOp ) op ) . path , EventType . FILE_CLOSED . getByteValue ( ) , op . getTransactionId ( ) ) ; case OP_DELETE : return new NamespaceNotification ( ( ( DeleteOp ) op ) . path , EventType . NODE_DELETED . getByteValue ( ) , op . getTransactionId ( ) ) ; case OP_MKDIR : return new NamespaceNotification ( ( ( MkdirOp ) op ) . path , EventType . DIR_ADDED . getByteValue ( ) , op . getTransactionId ( ) ) ; default : return null ; } }
Converts a Transaction Log operation into a NamespaceNotification object .
31,968
static boolean shouldSkipOp ( long currentTransactionId , FSEditLogOp op ) { if ( currentTransactionId == - 1 || op . getTransactionId ( ) > currentTransactionId ) { return false ; } return true ; }
We would skip the transaction if its id is less than or equal to current transaction id .
31,969
static long checkTransactionId ( long currentTransactionId , FSEditLogOp op ) throws IOException { if ( currentTransactionId != - 1 ) { if ( op . getTransactionId ( ) != currentTransactionId + 1 ) { LOG . error ( "Read invalid txId=" + op . getTransactionId ( ) + " expectedTxId=" + ( currentTransactionId + 1 ) + ":" + op ) ; throw new IOException ( "checkTransactionId failed" ) ; } } currentTransactionId = op . getTransactionId ( ) ; return currentTransactionId ; }
Asserts the read operation is the expected one .
31,970
public static void writeString ( DataOutput out , String s ) throws IOException { if ( s != null ) { Text text = new Text ( s ) ; byte [ ] buffer = text . getBytes ( ) ; int len = text . getLength ( ) ; writeVInt ( out , len ) ; out . write ( buffer , 0 , len ) ; } else { writeVInt ( out , - 1 ) ; } }
Write a String as a VInt n followed by n Bytes as in Text format .
31,971
public static String readString ( DataInput in ) throws IOException { int length = readVInt ( in ) ; if ( length == - 1 ) return null ; byte [ ] buffer = new byte [ length ] ; in . readFully ( buffer ) ; return Text . decode ( buffer ) ; }
Read a String as a VInt n followed by n Bytes in Text format .
31,972
static Node parse ( String expr , JobConf job ) throws IOException { if ( null == expr ) { throw new IOException ( "Expression is null" ) ; } Class < ? extends WritableComparator > cmpcl = job . getClass ( "mapred.join.keycomparator" , null , WritableComparator . class ) ; Lexer lex = new Lexer ( expr ) ; Stack < Token > st = new Stack < Token > ( ) ; Token tok ; while ( ( tok = lex . next ( ) ) != null ) { if ( TType . RPAREN . equals ( tok . getType ( ) ) ) { st . push ( reduce ( st , job ) ) ; } else { st . push ( tok ) ; } } if ( st . size ( ) == 1 && TType . CIF . equals ( st . peek ( ) . getType ( ) ) ) { Node ret = st . pop ( ) . getNode ( ) ; if ( cmpcl != null ) { ret . setKeyComparator ( cmpcl ) ; } return ret ; } throw new IOException ( "Missing ')'" ) ; }
Given an expression and an optional comparator build a tree of InputFormats using the comparator to sort keys .
31,973
private synchronized void addBookKeeperEntry ( byte [ ] buf , int off , int len ) throws IOException { try { ledger . addEntry ( buf , off , len ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Last add pushed to ledger " + ledger . getId ( ) + " is " + ledger . getLastAddPushed ( ) ) ; LOG . debug ( "Last add confirmed to ledger " + ledger . getId ( ) + " is " + ledger . getLastAddConfirmed ( ) ) ; } } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; throw new IOException ( "Interrupted writing to BookKeeper" , e ) ; } catch ( BKException e ) { throw new IOException ( "Failed to write to BookKeeper" , e ) ; } }
Write the buffer to a new entry in a BookKeeper ledger or throw an IOException if we are unable to successfully write to a quorum of bookies
31,974
public void write ( byte [ ] buf , int off , int len ) throws IOException { addBookKeeperEntry ( buf , off , len ) ; }
Write the buffer to a new entry in a BookKeeper ledger .
31,975
private void initStandbyFS ( ) { lastStandbyFSInit = System . currentTimeMillis ( ) ; try { if ( standbyFS != null ) { standbyFS . close ( ) ; } LOG . info ( "DAFS initializing standbyFS" ) ; LOG . info ( "DAFS primary=" + primaryURI . toString ( ) + " standby=" + standbyURI . toString ( ) ) ; standbyFS = new StandbyFS ( ) ; standbyFS . initialize ( standbyURI , conf ) ; } catch ( Exception e ) { LOG . info ( "DAFS cannot initialize standbyFS: " + StringUtils . stringifyException ( e ) ) ; standbyFS = null ; } }
Try to initialize standbyFS . Must hold writelock to call this method .
31,976
public static DataChecksum newDataChecksum ( int type , int bytesPerChecksum , Checksum sum ) { if ( bytesPerChecksum <= 0 ) { return null ; } int checksumSize = getChecksumSizeByType ( type ) ; switch ( type ) { case CHECKSUM_NULL : return new DataChecksum ( CHECKSUM_NULL , new ChecksumNull ( ) , checksumSize , bytesPerChecksum ) ; case CHECKSUM_CRC32 : return new DataChecksum ( CHECKSUM_CRC32 , sum , checksumSize , bytesPerChecksum ) ; default : return null ; } }
This constructor uses the specified summer instance
31,977
public void verifyChunkedSums ( ByteBuffer data , ByteBuffer checksums , String fileName , long basePos ) throws ChecksumException { if ( size == 0 ) return ; if ( data . isDirect ( ) && checksums . isDirect ( ) && NativeCrc32 . isAvailable ( ) ) { NativeCrc32 . verifyChunkedSums ( bytesPerChecksum , type , checksums , data , fileName , basePos ) ; return ; } if ( data . hasArray ( ) && checksums . hasArray ( ) ) { verifyChunkedSums ( data . array ( ) , data . arrayOffset ( ) + data . position ( ) , data . remaining ( ) , checksums . array ( ) , checksums . arrayOffset ( ) + checksums . position ( ) , fileName , basePos ) ; return ; } int startDataPos = data . position ( ) ; data . mark ( ) ; checksums . mark ( ) ; try { byte [ ] buf = new byte [ bytesPerChecksum ] ; byte [ ] sum = new byte [ size ] ; while ( data . remaining ( ) > 0 ) { int n = Math . min ( data . remaining ( ) , bytesPerChecksum ) ; checksums . get ( sum ) ; data . get ( buf , 0 , n ) ; summer . reset ( ) ; summer . update ( buf , 0 , n ) ; int calculated = ( int ) summer . getValue ( ) ; int stored = ( sum [ 0 ] << 24 & 0xff000000 ) | ( sum [ 1 ] << 16 & 0xff0000 ) | ( sum [ 2 ] << 8 & 0xff00 ) | sum [ 3 ] & 0xff ; if ( calculated != stored ) { long errPos = basePos + data . position ( ) - startDataPos - n ; throw new ChecksumException ( "Checksum error: " + fileName + " at " + errPos + " exp: " + stored + " got: " + calculated , errPos ) ; } } } finally { data . reset ( ) ; checksums . reset ( ) ; } }
Verify that the given checksums match the given data .
31,978
private void verifyChunkedSums ( byte [ ] data , int dataOff , int dataLen , byte [ ] checksums , int checksumsOff , String fileName , long basePos ) throws ChecksumException { int remaining = dataLen ; int dataPos = 0 ; while ( remaining > 0 ) { int n = Math . min ( remaining , bytesPerChecksum ) ; summer . reset ( ) ; summer . update ( data , dataOff + dataPos , n ) ; dataPos += n ; remaining -= n ; int calculated = ( int ) summer . getValue ( ) ; int stored = ( checksums [ checksumsOff ] << 24 & 0xff000000 ) | ( checksums [ checksumsOff + 1 ] << 16 & 0xff0000 ) | ( checksums [ checksumsOff + 2 ] << 8 & 0xff00 ) | checksums [ checksumsOff + 3 ] & 0xff ; checksumsOff += 4 ; if ( calculated != stored ) { long errPos = basePos + dataPos - n ; throw new ChecksumException ( "Checksum error: " + fileName + " at " + errPos + " exp: " + stored + " got: " + calculated , errPos ) ; } } }
Implementation of chunked verification specifically on byte arrays . This is to avoid the copy when dealing with ByteBuffers that have array backing .
31,979
public void calculateChunkedSums ( ByteBuffer data , ByteBuffer checksums ) { if ( size == 0 ) return ; if ( data . hasArray ( ) && checksums . hasArray ( ) ) { calculateChunkedSums ( data . array ( ) , data . arrayOffset ( ) + data . position ( ) , data . remaining ( ) , checksums . array ( ) , checksums . arrayOffset ( ) + checksums . position ( ) ) ; return ; } data . mark ( ) ; checksums . mark ( ) ; try { byte [ ] buf = new byte [ bytesPerChecksum ] ; while ( data . remaining ( ) > 0 ) { int n = Math . min ( data . remaining ( ) , bytesPerChecksum ) ; data . get ( buf , 0 , n ) ; summer . reset ( ) ; summer . update ( buf , 0 , n ) ; checksums . putInt ( ( int ) summer . getValue ( ) ) ; } } finally { data . reset ( ) ; checksums . reset ( ) ; } }
Calculate checksums for the given data .
31,980
private void calculateChunkedSums ( byte [ ] data , int dataOffset , int dataLength , byte [ ] sums , int sumsOffset ) { int remaining = dataLength ; while ( remaining > 0 ) { int n = Math . min ( remaining , bytesPerChecksum ) ; summer . reset ( ) ; summer . update ( data , dataOffset , n ) ; dataOffset += n ; remaining -= n ; long calculated = summer . getValue ( ) ; sums [ sumsOffset ++ ] = ( byte ) ( calculated >> 24 ) ; sums [ sumsOffset ++ ] = ( byte ) ( calculated >> 16 ) ; sums [ sumsOffset ++ ] = ( byte ) ( calculated >> 8 ) ; sums [ sumsOffset ++ ] = ( byte ) ( calculated ) ; } }
Implementation of chunked calculation specifically on byte arrays . This is to avoid the copy when dealing with ByteBuffers that have array backing .
31,981
public synchronized void shutdown ( ) { LOG . info ( "Shutting down all AsyncDiskService threads..." ) ; for ( Map . Entry < String , ThreadPoolExecutor > e : executors . entrySet ( ) ) { e . getValue ( ) . shutdown ( ) ; } }
Gracefully start the shut down of all ThreadPools .
31,982
public synchronized List < Runnable > shutdownNow ( ) { LOG . info ( "Shutting down all AsyncDiskService threads immediately..." ) ; List < Runnable > list = new ArrayList < Runnable > ( ) ; for ( Map . Entry < String , ThreadPoolExecutor > e : executors . entrySet ( ) ) { list . addAll ( e . getValue ( ) . shutdownNow ( ) ) ; } return list ; }
Shut down all ThreadPools immediately .
31,983
public synchronized static void clearObsoleteImageUploads ( long minTxIdToKeep , String journalId ) { for ( Iterator < Map . Entry < Long , SessionDescriptor > > it = sessions . entrySet ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Map . Entry < Long , SessionDescriptor > entry = it . next ( ) ; if ( entry . getValue ( ) . journalId . equals ( journalId ) && entry . getValue ( ) . txid < minTxIdToKeep ) { it . remove ( ) ; } } }
Clean the map from obsolete images .
31,984
private static Journal getStorage ( ServletContext context , UploadImageParam params ) throws IOException { final Journal journal = JournalNodeHttpServer . getJournalFromContextIfExists ( context , params . journalId ) ; if ( journal == null ) { throwIOException ( "Journal: " + params . journalId + " does not exist" ) ; } final JNStorage storage = journal . getImageStorage ( ) ; storage . checkConsistentNamespace ( params . getStorageInfo ( ) ) ; if ( ! journal . checkWriterEpoch ( params . epoch ) ) { throwIOException ( "This is not the active writer" ) ; } return journal ; }
Get storage object from underlying journal node given request parameters . If yournal does not exist an exception will be thrown . If the journal metadata does not match the request metadata we also fail .
31,985
@ SuppressWarnings ( "unchecked" ) protected void reduce ( KEYIN key , Iterable < VALUEIN > values , Context context ) throws IOException , InterruptedException { for ( VALUEIN value : values ) { context . write ( ( KEYOUT ) key , ( VALUEOUT ) value ) ; } }
This method is called once for each key . Most applications will define their reduce class by overriding this method . The default implementation is an identity function .
31,986
public static DataOutputStream wrapOutputStream ( OutputStream os , int bufferSize , int writeBufferSize ) { return new DataOutputStream ( new BufferedOutputStream ( new BufferedByteOutputStream ( os , bufferSize , writeBufferSize ) ) ) ; }
Wrap given output stream with BufferedByteInputOutput . This is the only way to instantiate the buffered output stream .
31,987
public void close ( ) throws IOException { if ( closed ) { checkWriteThread ( ) ; return ; } try { buffer . close ( ) ; try { writeThread . join ( ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } finally { checkWriteThread ( ) ; } }
Close the output stream . Joins the thread and closes the underlying output stream .
31,988
public void flush ( ) throws IOException { checkError ( ) ; long totalBytesWritten = buffer . totalWritten ( ) ; buffer . unblockReads ( ) ; while ( writeThread . totalBytesTransferred < totalBytesWritten ) { BufferedByteInputOutput . sleep ( 1 ) ; } InjectionHandler . processEvent ( InjectionEventCore . BUFFEREDBYTEOUTPUTSTREAM_FLUSH , writeThread . totalBytesTransferred ) ; checkError ( ) ; buffer . blockReads ( ) ; underlyingOutputStream . flush ( ) ; }
Waits until the buffer has been written to underlying stream . Flushes the underlying stream .
31,989
private void validate ( File root , Collection < URI > dirs ) throws IOException { if ( dirs == null ) return ; for ( URI dir : dirs ) { if ( new File ( dir . getPath ( ) ) . getAbsolutePath ( ) . equals ( root . getAbsolutePath ( ) ) ) { return ; } } throwIOException ( "Error. Storage directory: " + root + " is not in the configured list of storage directories: " + dirs ) ; }
For sanity checking that the given storage directory was configured .
31,990
private void validate ( URI location , Collection < URI > dirs ) throws IOException { if ( dirs != null && ! dirs . contains ( location ) ) { throwIOException ( "Error. Location: " + location + " is not in the configured list of storage directories: " + dirs ) ; } }
For sanity checking that the given location was configured .
31,991
public synchronized List < OutputStream > getCheckpointImageOutputStreams ( long imageTxId ) throws IOException { List < OutputStream > list = new ArrayList < OutputStream > ( ) ; for ( ImageManager im : imageManagers ) { list . add ( im . getCheckpointOutputStream ( imageTxId ) ) ; } return list ; }
Get the list of output streams for all underlying image managers given the checkpoint transaction id .
31,992
public synchronized void saveDigestAndRenameCheckpointImage ( long txid , MD5Hash digest ) throws IOException { for ( ImageManager im : imageManagers ) { if ( im . saveDigestAndRenameCheckpointImage ( txid , digest ) ) { im . setImageDisabled ( false ) ; } else { im . setImageDisabled ( true ) ; } } checkImageManagers ( ) ; }
Save digest in all underlying image managers and rename the checkpoint file . This will throw an exception if all image managers fail .
31,993
void checkImageManagers ( ) throws IOException { updateImageMetrics ( ) ; int numAvailable = 0 ; for ( ImageManager im : imageManagers ) { if ( ! im . isImageDisabled ( ) ) { numAvailable ++ ; } } if ( numAvailable == 0 ) { throwIOException ( "No image locations are available" ) ; } }
Check if any image managers are available .
31,994
void updateImageMetrics ( ) { if ( metrics == null ) { return ; } int failedImageDirs = 0 ; for ( ImageManager im : imageManagers ) { if ( im . isImageDisabled ( ) ) { failedImageDirs ++ ; } } metrics . imagesFailed . set ( failedImageDirs ) ; }
Count available image managers and update namenode metrics .
31,995
public void transitionNonFileImages ( StorageInfo nsInfo , boolean checkEmpty , Transition transition , StartupOption startOpt ) throws IOException { for ( ImageManager im : imageManagers ) { if ( ! ( im instanceof FileImageManager ) ) { if ( checkEmpty && im . hasSomeImageData ( ) ) { LOG . warn ( "Image " + im + " is not empty." ) ; continue ; } LOG . info ( transition + " : " + im ) ; im . transitionImage ( nsInfo , transition , startOpt ) ; } } }
Format the non - file images .
31,996
List < ImageManager > getNonFileImageManagers ( ) { List < ImageManager > nonFile = new ArrayList < ImageManager > ( ) ; for ( ImageManager im : imageManagers ) { if ( ! ( im instanceof FileImageManager ) ) { nonFile . add ( im ) ; } } return nonFile ; }
Get the list of non - file image managers .
31,997
public static List < OutputStream > convertFilesToStreams ( File [ ] localPaths , Storage dstStorage , String str ) throws IOException { List < OutputStream > outputStreams = new ArrayList < OutputStream > ( ) ; if ( localPaths != null ) { for ( File f : localPaths ) { try { if ( f . exists ( ) ) { LOG . warn ( "Overwriting existing file " + f + " with file downloaded form " + str ) ; } outputStreams . add ( new FileOutputStream ( f ) ) ; } catch ( IOException ioe ) { LOG . warn ( "Unable to download file " + f , ioe ) ; if ( dstStorage != null && ( dstStorage instanceof StorageErrorReporter ) ) { ( ( StorageErrorReporter ) dstStorage ) . reportErrorOnFile ( f ) ; } } } if ( outputStreams . isEmpty ( ) ) { throw new IOException ( "Unable to download to any storage directory" ) ; } } return outputStreams ; }
Convert given list of files to a list of output streams .
31,998
private static ThriftServerConfig getServerConfig ( Configuration conf ) { ThriftServerConfig serverConfig = new ThriftServerConfig ( ) ; serverConfig . setPort ( conf . getInt ( StorageServiceConfigKeys . PROXY_THRIFT_PORT_KEY , StorageServiceConfigKeys . PROXY_THRIFT_PORT_DEFAULT ) ) ; return serverConfig ; }
Translates Hadoop s Configuration into Thrift s server config
31,999
public EventRecord query ( String s ) throws Exception { StringBuffer sb ; sb = Environment . runCommand ( "cat sensors.out" ) ; EventRecord retval = new EventRecord ( InetAddress . getLocalHost ( ) . getCanonicalHostName ( ) , InetAddress . getAllByName ( InetAddress . getLocalHost ( ) . getHostName ( ) ) , Calendar . getInstance ( ) , "lm-sensors" , "Unknown" , "sensors -A" , "-" ) ; readGroup ( retval , sb , "fan" ) ; readGroup ( retval , sb , "in" ) ; readGroup ( retval , sb , "temp" ) ; readGroup ( retval , sb , "Core" ) ; return retval ; }
Reads and parses the output of the sensors command and creates an appropriate EventRecord that holds the desirable information .