idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,500
protected void setFields ( Properties props , StorageDirectory sd ) throws IOException { super . setFields ( props , sd ) ; boolean uState = getDistributedUpgradeState ( ) ; int uVersion = getDistributedUpgradeVersion ( ) ; if ( uState && uVersion != getLayoutVersion ( ) ) { props . setProperty ( "distributedUpgradeSta...
Write version file into the storage directory .
33,501
private void setDeprecatedPropertiesForUpgrade ( Properties props ) { deprecatedProperties = new HashMap < String , String > ( ) ; String md5 = props . getProperty ( MESSAGE_DIGEST_PROPERTY ) ; if ( md5 != null ) { deprecatedProperties . put ( MESSAGE_DIGEST_PROPERTY , md5 ) ; } }
Pull any properties out of the VERSION file that are from older versions of HDFS and only necessary during upgrade .
33,502
File findFinalizedEditsFile ( long startTxId , long endTxId ) throws IOException { File ret = findFile ( NameNodeDirType . EDITS , getFinalizedEditsFileName ( startTxId , endTxId ) ) ; if ( ret == null ) { throw new IOException ( "No edits file for txid " + startTxId + "-" + endTxId + " exists!" ) ; } return ret ; }
Return the first readable finalized edits file for the given txid .
33,503
File findInProgressEditsFile ( long startTxId ) throws IOException { File ret = findFile ( NameNodeDirType . EDITS , getInProgressEditsFileName ( startTxId ) ) ; if ( ret == null ) { throw new IOException ( "No edits file for txid " + startTxId + "-in progress" ) ; } return ret ; }
Return the first readable inprogress edits file for the given txid .
33,504
private File findFile ( NameNodeDirType dirType , String name ) { for ( StorageDirectory sd : dirIterable ( dirType ) ) { File candidate = new File ( sd . getCurrentDir ( ) , name ) ; if ( sd . getCurrentDir ( ) . canRead ( ) && candidate . exists ( ) ) { return candidate ; } } return null ; }
Return the first readable storage file of the given name across any of the current directories in SDs of the given type or null if no such file exists .
33,505
static boolean isPreferred ( StorageLocationType type , StorageDirectory sd ) { if ( ( sd instanceof NNStorageDirectory ) ) { return ( ( NNStorageDirectory ) sd ) . type == type ; } return true ; }
Checks if we have information about this directory that it is preferred .
33,506
static StorageLocationType getType ( StorageDirectory sd ) { if ( ( sd instanceof NNStorageDirectory ) ) { return ( ( NNStorageDirectory ) sd ) . type ; } return StorageLocationType . LOCAL ; }
Get the type of given directory .
33,507
void verifyDistributedUpgradeProgress ( StartupOption startOpt ) throws IOException { if ( startOpt == StartupOption . ROLLBACK || startOpt == StartupOption . IMPORT ) return ; assert upgradeManager != null : "FSNameSystem.upgradeManager is null." ; if ( startOpt != StartupOption . UPGRADE ) { if ( upgradeManager . get...
Verify that the distributed upgrade state is valid .
33,508
void initializeDistributedUpgrade ( ) throws IOException { if ( ! upgradeManager . initializeUpgrade ( ) ) return ; writeAll ( ) ; LOG . info ( "\n Distributed upgrade for NameNode version " + upgradeManager . getUpgradeVersion ( ) + " to current LV " + layoutVersion + " is initialized." ) ; }
Initialize a distributed upgrade .
33,509
synchronized void reportErrorsOnDirectories ( List < StorageDirectory > sds , FSImage image ) throws IOException { for ( StorageDirectory sd : sds ) { reportErrorsOnDirectory ( sd , image ) ; } if ( image != null ) { image . checkImageManagers ( ) ; } if ( ! sds . isEmpty ( ) ) { if ( this . getNumStorageDirs ( ) == 0 ...
Marks a list of directories as having experienced an error .
33,510
synchronized void reportErrorsOnDirectory ( StorageDirectory sd , FSImage image ) { String lsd = listStorageDirectories ( ) ; LOG . info ( "reportErrorsOnDirectory: Current list of storage dirs:" + lsd ) ; LOG . error ( "reportErrorsOnDirectory: Error reported on storage directory " + sd . getRoot ( ) ) ; if ( this . s...
Reports that a directory has experienced an error . Notifies listeners that the directory is no longer available .
33,511
public void reportErrorOnFile ( File f ) { String absPath = f . getAbsolutePath ( ) ; for ( StorageDirectory sd : storageDirs ) { String dirPath = sd . getRoot ( ) . getAbsolutePath ( ) ; if ( ! dirPath . endsWith ( "/" ) ) { dirPath += "/" ; } if ( absPath . startsWith ( dirPath ) ) { reportErrorsOnDirectory ( sd , nu...
Report that an IOE has occurred on some file which may or may not be within one of the NN image storage directories .
33,512
void inspectStorageDirs ( FSImageStorageInspector inspector ) throws IOException { for ( Iterator < StorageDirectory > it = dirIterator ( ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; inspector . inspectDirectory ( sd ) ; } }
Iterate over all current storage directories inspecting them with the given inspector .
33,513
FSImageStorageInspector readAndInspectDirs ( ) throws IOException { int minLayoutVersion = Integer . MAX_VALUE ; int maxLayoutVersion = Integer . MIN_VALUE ; for ( Iterator < StorageDirectory > it = dirIterator ( ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; if ( ! sd . getVersionFile ( ) . exists ( ...
Iterate over all of the storage dirs reading their contents to determine their layout versions . Returns an FSImageStorageInspector which has inspected each directory .
33,514
public static boolean isAvailable ( ) { try { String osName = System . getProperty ( "os.name" ) ; if ( ! osName . startsWith ( "Linux" ) ) { LOG . info ( "ProcfsBasedProcessTree currently is supported only on " + "Linux." ) ; return false ; } } catch ( SecurityException se ) { LOG . warn ( "Failed to get Operating Sys...
Checks if the ProcfsBasedProcessTree is available on this system .
33,515
public ProcfsBasedProcessTree getProcessTree ( ) { if ( pid != - 1 ) { List < Integer > processList = getProcessList ( ) ; Map < Integer , ProcessInfo > allProcessInfo = new HashMap < Integer , ProcessInfo > ( ) ; Map < Integer , ProcessInfo > oldProcs = new HashMap < Integer , ProcessInfo > ( processTree ) ; processTr...
Get the process - tree with latest state . If the root - process is not alive an empty tree will be returned .
33,516
public boolean isAnyProcessInTreeAlive ( ) { for ( Integer pId : processTree . keySet ( ) ) { if ( isAlive ( pId . toString ( ) ) ) { return true ; } } return false ; }
Is any of the subprocesses in the process - tree alive?
33,517
static boolean checkPidPgrpidForMatch ( String pidStr , String procfsDir ) { Integer pId = Integer . parseInt ( pidStr ) ; ProcessInfo pInfo = new ProcessInfo ( pId ) ; pInfo = constructProcessInfo ( pInfo , procfsDir ) ; if ( pInfo == null ) { return true ; } if ( ! pInfo . getPgrpId ( ) . equals ( pId ) ) { LOG . war...
Verify that the given process id is same as its process group id .
33,518
public static void assertAndDestroyProcessGroup ( String pgrpId , long interval , boolean inBackground ) throws IOException { if ( ! checkPidPgrpidForMatch ( pgrpId , PROCFS ) ) { throw new IOException ( "Process with PID " + pgrpId + " is not a process group leader." ) ; } destroyProcessGroup ( pgrpId , interval , inB...
Make sure that the given pid is a process group leader and then destroy the process group .
33,519
public String getProcessTreeDump ( ) { StringBuilder ret = new StringBuilder ( ) ; ret . append ( String . format ( "\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n" ) ) ; for ( ProcessInfo p : processTree . values ( ) ) { i...
Get a dump of the process - tree .
33,520
public Collection < String > getProcessNameContainsCount ( String name ) { List < String > retProcessList = new ArrayList < String > ( ) ; List < Integer > processList = getProcessList ( ) ; for ( Integer proc : processList ) { ProcessInfo p = new ProcessInfo ( proc ) ; if ( constructProcessInfo ( p , procfsDir ) != nu...
Get a count of the number of processes that have a commandline that matches a name .
33,521
public long getCumulativeVmem ( int olderThanAge ) { long total = 0 ; for ( ProcessInfo p : processTree . values ( ) ) { if ( ( p != null ) && ( p . getAge ( ) > olderThanAge ) ) { total += p . getVmem ( ) ; } } return total ; }
Get the cumulative virtual memory used by all the processes in the process - tree that are older than the passed in age .
33,522
public long getCumulativeCpuTime ( ) { if ( JIFFY_LENGTH_IN_MILLIS < 0 ) { return 0 ; } long incJiffies = 0 ; for ( ProcessInfo p : processTree . values ( ) ) { if ( p != null ) { incJiffies += p . dtime ; } } if ( incJiffies * JIFFY_LENGTH_IN_MILLIS < Integer . MAX_VALUE ) { cpuTime += incJiffies * JIFFY_LENGTH_IN_MIL...
Get the CPU time in millisecond used by all the processes in the process - tree since the process - tree created
33,523
private List < Integer > getProcessList ( ) { String [ ] processDirs = ( new File ( procfsDir ) ) . list ( ) ; List < Integer > processList = new ArrayList < Integer > ( ) ; for ( String dir : processDirs ) { try { int pd = Integer . parseInt ( dir ) ; if ( ( new File ( procfsDir , dir ) ) . isDirectory ( ) ) { process...
Get the list of all processes in the system .
33,524
private static ProcessInfo constructProcessInfo ( ProcessInfo pinfo , String procfsDir ) { ProcessInfo ret = null ; BufferedReader in = null ; FileReader fReader = null ; try { File pidDir = new File ( procfsDir , String . valueOf ( pinfo . getPid ( ) ) ) ; fReader = new FileReader ( new File ( pidDir , PROCFS_STAT_FIL...
Construct the ProcessInfo using the process PID and procfs rooted at the specified directory and return the same . It is provided mainly to assist testing purposes .
33,525
private static void parseOptions ( String args [ ] ) { if ( ! ( args . length == 14 || args . length == 12 || args . length == 5 ) ) { usage ( ) ; } for ( int i = 0 ; i < args . length ; i ++ ) { if ( args [ i ] . equals ( "-s" ) && i + 1 < args . length ) { dfsServer_ = args [ i + 1 ] ; System . out . println ( args [...
Parses command line options . the hostName processName are used to namespace the files created by this instance of the benchmark .
33,526
private static void usage ( ) { String className = MStress_Client . class . getName ( ) ; System . out . printf ( "Usage: java %s -s dfs-server -p dfs-port" + "[-t [create|stat|read|readdir|delete|rename] -a planfile-path -c host -n process-name" + " -P prefix]\n" , className ) ; System . out . printf ( " -t: this op...
Prints usage information to standard out .
33,527
private static int parsePlanFile ( ) { int ret = - 1 ; try { FileInputStream fis = new FileInputStream ( planfilePath_ ) ; DataInputStream dis = new DataInputStream ( fis ) ; BufferedReader br = new BufferedReader ( new InputStreamReader ( dis ) ) ; if ( prefix_ . isEmpty ( ) ) { prefix_ = "PATH_PREFIX_" ; } String lin...
Parses the plan file that contains parameters for the benchmark .
33,528
private static int CreateDFSPaths ( int level , String parentPath ) { Boolean isLeaf = false ; Boolean isDir = false ; if ( level + 1 >= levels_ ) { isLeaf = true ; } if ( isLeaf ) { if ( type_ . equals ( "dir" ) ) { isDir = true ; } else { isDir = false ; } } else { isDir = true ; } for ( int i = 0 ; i < inodesPerLeve...
Recursively creates directories and files .
33,529
private static int createWriteDFSPaths ( ) { if ( createDFSPaths ( ) != 0 ) { return - 1 ; } try { for ( Map . Entry < String , OutputStream > file : files_ . entrySet ( ) ) { OutputStream os = file . getValue ( ) ; long startTime = System . nanoTime ( ) ; os . write ( data_ . getBytes ( ) ) ; timingWrite_ . add ( new ...
This creates DFS paths and writes data_ to them in one go .
33,530
@ SuppressWarnings ( "deprecation" ) private void localizeTaskConfiguration ( TaskTracker tracker , JobConf ttConf , String workDir , Task t , JobID jobID ) throws IOException { Path jobFile = new Path ( t . getJobFile ( ) ) ; FileSystem systemFS = tracker . systemFS ; this . localizedJobFile = new Path ( workDir , job...
Copies the job file to the working directory of the process that will be started .
33,531
public boolean prepare ( ) throws IOException { if ( ! super . prepare ( ) ) { return false ; } mapOutputFile . removeAll ( getTask ( ) . getTaskID ( ) ) ; return true ; }
Delete any temporary files from previous failed attempts .
33,532
protected void addDefaults ( ) { try { Parser . CNode . addIdentifier ( "inner" , InnerJoinRecordReader . class ) ; Parser . CNode . addIdentifier ( "outer" , OuterJoinRecordReader . class ) ; Parser . CNode . addIdentifier ( "override" , OverrideRecordReader . class ) ; Parser . WNode . addIdentifier ( "tbl" , Wrapped...
Adds the default set of identifiers to the parser .
33,533
private void addUserIdentifiers ( JobConf job ) throws IOException { Pattern x = Pattern . compile ( "^mapred\\.join\\.define\\.(\\w+)$" ) ; for ( Map . Entry < String , String > kv : job ) { Matcher m = x . matcher ( kv . getKey ( ) ) ; if ( m . matches ( ) ) { try { Parser . CNode . addIdentifier ( m . group ( 1 ) , ...
Inform the parser of user - defined types .
33,534
public InputSplit [ ] getSplits ( JobConf job , int numSplits ) throws IOException { setFormat ( job ) ; job . setLong ( "mapred.min.split.size" , Long . MAX_VALUE ) ; return root . getSplits ( job , numSplits ) ; }
Build a CompositeInputSplit from the child InputFormats by assigning the ith split from each child to the ith composite split .
33,535
@ SuppressWarnings ( "unchecked" ) public ComposableRecordReader < K , TupleWritable > getRecordReader ( InputSplit split , JobConf job , Reporter reporter ) throws IOException { setFormat ( job ) ; return root . getRecordReader ( split , job , reporter ) ; }
Construct a CompositeRecordReader for the children of this InputFormat as defined in the init expression . The outermost join need only be composable not necessarily a composite . Mandating TupleWritable isn t strictly correct .
33,536
public static void main ( String [ ] args ) { if ( args . length == 3 && "-getlevel" . equals ( args [ 0 ] ) ) { process ( "http://" + args [ 1 ] + "/logLevel?log=" + args [ 2 ] ) ; return ; } else if ( args . length == 4 && "-setlevel" . equals ( args [ 0 ] ) ) { process ( "http://" + args [ 1 ] + "/logLevel?log=" + a...
A command line implementation
33,537
public FileStatus getFileStatus ( Path f ) throws IOException { INode inode = store . retrieveINode ( makeAbsolute ( f ) ) ; if ( inode == null ) { throw new FileNotFoundException ( f + ": No such file or directory." ) ; } return new S3FileStatus ( f . makeQualified ( this ) , inode ) ; }
FileStatus for S3 file systems .
33,538
public void assignClient ( long clientId ) { LOG . info ( "Assigning client " + clientId + " ..." ) ; synchronized ( clientModificationsLock ) { newlyAssignedClients . add ( clientId ) ; } }
Assigns a client to this dispatcher . If a notification fails to be sent to a client then it will be placed in a queue and the assigned dispatcher for each client will try to re - send notifications from that queue .
33,539
public void handleFailedDispatch ( long clientId , long failedTime ) { ClientData clientData = core . getClientData ( clientId ) ; if ( failedTime == - 1 || clientData == null ) return ; if ( clientData . markedAsFailedTime == - 1 ) { clientData . markedAsFailedTime = failedTime ; LOG . info ( "Marked client " + client...
Called each time a handleNotification or heartbeat Thrift call fails .
33,540
public void handleSuccessfulDispatch ( long clientId , long sentTime ) { ClientData clientData = core . getClientData ( clientId ) ; if ( sentTime == - 1 || clientData == null ) return ; clientData . markedAsFailedTime = - 1 ; if ( clientData . markedAsFailedTime != - 1 ) { LOG . info ( "Unmarking " + clientId + " at "...
Called each time a handleNotification or heartbeat Thrift call is successful .
33,541
private void updateClients ( ) { assignedClients . addAll ( newlyAssignedClients ) ; assignedClients . removeAll ( removedClients ) ; newlyAssignedClients . clear ( ) ; removedClients . clear ( ) ; }
Must be called with the clientsModificationLock hold .
33,542
private void populateAccess ( ) throws SQLException { PreparedStatement statement = null ; try { statement = connection . prepareStatement ( "INSERT INTO Access(url, referrer, time)" + " VALUES (?, ?, ?)" ) ; Random random = new Random ( ) ; int time = random . nextInt ( 50 ) + 50 ; final int PROBABILITY_PRECISION = 10...
Populates the Access table with generated records .
33,543
private boolean verify ( ) throws SQLException { String countAccessQuery = "SELECT COUNT(*) FROM Access" ; String sumPageviewQuery = "SELECT SUM(pageview) FROM Pageview" ; Statement st = null ; ResultSet rs = null ; try { st = connection . createStatement ( ) ; rs = st . executeQuery ( countAccessQuery ) ; rs . next ( ...
Verifies the results are correct
33,544
DatanodeBlockInfo get ( int namespaceId , Block block ) { checkBlock ( block ) ; NamespaceMap nm = getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return null ; } return nm . getBlockInfo ( block ) ; }
Get the meta information of the replica that matches both block id and generation stamp
33,545
DatanodeBlockInfo remove ( int namespaceId , Block block ) { NamespaceMap nm = getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return null ; } if ( datasetDelta != null ) { datasetDelta . removeBlock ( namespaceId , block ) ; } return nm . removeBlockInfo ( block ) ; }
Remove the replica s meta information from the map that matches the input block s id and generation stamp
33,546
int size ( int namespaceId ) { NamespaceMap nm = getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return 0 ; } return nm . size ( ) ; }
Get the size of the map for given namespace
33,547
ActiveFile getOngoingCreates ( int namespaceId , Block block ) { checkBlock ( block ) ; NamespaceMap nm = getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return null ; } return nm . getOngoingCreates ( block ) ; }
for ongoing creates
33,548
public static void parseJobTasks ( String jobHistoryFile , JobHistory . JobInfo job , FileSystem fs ) throws IOException { JobHistory . parseHistoryFromFS ( jobHistoryFile , new JobTasksParseListener ( job ) , fs ) ; }
Populates a JobInfo object from the job s history log file .
33,549
private void validateAllPolicies ( Collection < PolicyInfo > all ) throws IOException , NumberFormatException { for ( PolicyInfo pinfo : all ) { Path srcPath = pinfo . getSrcPath ( ) ; if ( srcPath == null ) { throw new IOException ( "Unable to find srcPath in policy." ) ; } if ( pinfo . getProperty ( "replication" ) =...
Validate a collection of policies
33,550
private void purgeDirectories ( FileSystem fs , Path root ) throws IOException { DirectoryTraversal traversal = DirectoryTraversal . directoryRetriever ( Arrays . asList ( root ) , fs , directoryTraversalThreads , directoryTraversalShuffle ) ; String prefix = root . toUri ( ) . getPath ( ) ; FileStatus dir ; while ( ( ...
Traverse the parity destination directory removing directories that no longer existing in the source .
33,551
private static boolean existsBetterParityFile ( Codec codec , FileStatus srcStat , Configuration conf ) throws IOException { for ( Codec c : Codec . getCodecs ( ) ) { if ( c . priority > codec . priority ) { ParityFilePair ppair = ParityFilePair . getParityFile ( c , srcStat , conf ) ; if ( ppair != null ) { return tru...
Is there a parity file which has a codec with higher priority?
33,552
public int copyTo ( byte [ ] dest , int start ) throws BufferTooSmallException { if ( size > ( dest . length - start ) ) { throw new BufferTooSmallException ( "size is " + size + ", buffer availabe size is " + ( dest . length - start ) ) ; } if ( size > 0 ) { System . arraycopy ( bytes , 0 , dest , start , size ) ; } r...
copy the byte array to the dest array and return the number of bytes copied .
33,553
public void setCapacity ( int new_cap ) { if ( new_cap != getCapacity ( ) ) { byte [ ] new_data = new byte [ new_cap ] ; if ( new_cap < size ) { size = new_cap ; } if ( size != 0 ) { System . arraycopy ( bytes , 0 , new_data , 0 , size ) ; } bytes = new_data ; } }
Change the capacity of the backing storage . The data is preserved .
33,554
public void set ( byte [ ] newData , int offset , int length ) { setSize ( 0 ) ; setSize ( length ) ; System . arraycopy ( newData , offset , bytes , 0 , size ) ; }
Set the value to a copy of the given byte range
33,555
protected static long setBits ( long num , int start , int len , long value ) { value = value & ( ( 1L << len ) - 1 ) ; long val_mask = value << start ; long zero_mask = ~ ( ( ( 1L << len ) - 1 ) << start ) ; return ( num & zero_mask ) | val_mask ; }
Sets specific bits of a specific number .
33,556
private int initFileDirTables ( ) { try { initFileDirTables ( root ) ; } catch ( IOException e ) { System . err . println ( e . getLocalizedMessage ( ) ) ; e . printStackTrace ( ) ; return - 1 ; } if ( dirs . isEmpty ( ) ) { System . err . println ( "The test space " + root + " is empty" ) ; return - 1 ; } if ( files ....
Create a table that contains all directories under root and another table that contains all files under root .
33,557
private void initFileDirTables ( Path path ) throws IOException { FileStatus [ ] stats = fs . listStatus ( path ) ; if ( stats != null ) { for ( FileStatus stat : stats ) { if ( stat . isDir ( ) ) { dirs . add ( stat . getPath ( ) . toString ( ) ) ; initFileDirTables ( stat . getPath ( ) ) ; } else { Path filePath = st...
Create a table that contains all directories under the specified path and another table that contains all files under the specified path and whose name starts with _file_ .
33,558
protected String [ ] getTaskDiagnosticsImpl ( TaskAttemptID taskId ) throws IOException { List < String > taskDiagnosticInfo = null ; JobID jobId = taskId . getJobID ( ) ; TaskID tipId = taskId . getTaskID ( ) ; JobInProgressTraits job = getJobInProgress ( jobId ) ; if ( job != null && job . inited ( ) ) { TaskInProgre...
Get the diagnostics for a given task
33,559
public TaskInProgress getTip ( TaskID tipid ) { JobInProgressTraits job = getJobInProgress ( tipid . getJobID ( ) ) ; return ( job == null ? null : job . getTaskInProgress ( tipid ) ) ; }
Returns specified TaskInProgress or null .
33,560
public T pollFirst ( ) { if ( head == null ) { return null ; } T first = head . element ; this . remove ( first ) ; return first ; }
Remove and return first element on the linked list of all elements .
33,561
public void pollNToList ( int n , List < T > retList ) { if ( n >= size ) { pollAllToList ( retList ) ; } while ( n -- > 0 && head != null ) { T curr = head . element ; this . removeElem ( curr ) ; retList . add ( curr ) ; } shrinkIfNecessary ( ) ; }
Remove and return first n elements on the linked list of all elements . Put elements into the given list .
33,562
public static Path getTaskOutputPath ( JobConf conf , String name ) throws IOException { Path outputPath = getOutputPath ( conf ) ; if ( outputPath == null ) { throw new IOException ( "Undefined job output-path" ) ; } OutputCommitter committer = conf . getOutputCommitter ( ) ; Path workPath = outputPath ; TaskAttemptCo...
Helper function to create the task s temporary output directory and return the path to the task s output file .
33,563
public static String getUniqueName ( JobConf conf , String name ) { int partition = conf . getInt ( "mapred.task.partition" , - 1 ) ; if ( partition == - 1 ) { throw new IllegalArgumentException ( "This method can only be called from within a Job" ) ; } String taskType = ( conf . getBoolean ( "mapred.task.is.map" , tru...
Helper function to generate a name that is unique for the task .
33,564
synchronized void openInfo ( ) throws IOException { if ( src == null && blocks == null ) { throw new IOException ( "No file provided to open" ) ; } LocatedBlocks newInfo = src != null ? getLocatedBlocks ( src , 0 , prefetchSize ) : blocks ; if ( newInfo == null ) { throw new FileNotFoundException ( "Cannot open filenam...
Grab the open - file info from namenode
33,565
private Block getBlockInfo ( LocatedBlock locatedblock ) throws IOException { if ( locatedblock == null || locatedblock . getLocations ( ) . length == 0 ) { return null ; } int replicaNotFoundCount = locatedblock . getLocations ( ) . length ; for ( DatanodeInfo datanode : locatedblock . getLocations ( ) ) { ProtocolPro...
Get block info from a datanode
33,566
private LocatedBlock getBlockAt ( long offset , boolean updatePosition , boolean throwWhenNotFound ) throws IOException { assert ( locatedBlocks != null ) : "locatedBlocks is null" ; locatedBlocks . blockLocationInfoExpiresIfNeeded ( ) ; LocatedBlock blk = locatedBlocks . getBlockContainingOffset ( offset ) ; if ( blk ...
Get block at the specified position . Fetch it from the namenode if not cached .
33,567
private List < LocatedBlock > getBlockRange ( final long offset , final long length ) throws IOException { List < LocatedBlock > blockRange = new ArrayList < LocatedBlock > ( ) ; if ( length == 0 ) return blockRange ; int maxLoops = 10000 ; DFSLocatedBlocks locatedBlocks = this . locatedBlocks ; if ( locatedBlocks == n...
Get blocks in the specified range . The locations of all blocks overlapping with the given segment of the file are retrieved . Fetch them from the namenode if not cached .
33,568
public synchronized void close ( ) throws IOException { try { if ( closed ) { return ; } dfsClient . checkOpen ( ) ; if ( blockReader != null ) { closeBlockReader ( blockReader , false ) ; blockReader = null ; } for ( BlockReaderLocalBase brl : localBlockReaders . values ( ) ) { try { brl . close ( ) ; } catch ( IOExce...
Close it down!
33,569
public synchronized int read ( byte buf [ ] , int off , int len ) throws IOException { dfsClient . checkOpen ( ) ; if ( closed ) { dfsClient . incReadExpCntToStats ( ) ; throw new IOException ( "Stream closed" ) ; } DFSClient . dfsInputStreamfailures . set ( 0 ) ; long start = System . currentTimeMillis ( ) ; if ( pos ...
Read the entire buffer .
33,570
private void closeBlockReader ( BlockReader reader , boolean reuseConnection ) throws IOException { if ( reader . hasSentStatusCode ( ) ) { Socket oldSock = reader . takeSocket ( ) ; if ( dfsClient . getDataTransferProtocolVersion ( ) < DataTransferProtocol . READ_REUSE_CONNECTION_VERSION || ! reuseConnection ) { if ( ...
Close the given BlockReader and cache its socket .
33,571
protected BlockReader getBlockReader ( int protocolVersion , int namespaceId , InetSocketAddress dnAddr , String file , long blockId , long generationStamp , long startOffset , long len , int bufferSize , boolean verifyChecksum , String clientName , long bytesToCheckReadSpeed , long minReadSpeedBps , boolean reuseConne...
Retrieve a BlockReader suitable for reading . This method will reuse the cached connection to the DN if appropriate . Otherwise it will create a new connection .
33,572
public int read ( long position , byte [ ] buffer , int offset , int length , ReadOptions options ) throws IOException { dfsClient . checkOpen ( ) ; if ( closed ) { throw new IOException ( "Stream closed" ) ; } DFSClient . dfsInputStreamfailures . set ( 0 ) ; long start = System . currentTimeMillis ( ) ; long filelen =...
Read bytes starting from the specified position .
33,573
public List < ByteBuffer > readFullyScatterGather ( long position , int length ) throws IOException { if ( dfsClient . dataTransferVersion < DataTransferProtocol . SCATTERGATHER_VERSION ) { return super . readFullyScatterGather ( position , length ) ; } dfsClient . checkOpen ( ) ; if ( closed ) { throw new IOException ...
Read bytes starting from the specified position . This is optimized for fast preads from an application with minimum of buffer copies .
33,574
public synchronized void seek ( long targetPos ) throws IOException { if ( targetPos > getFileLength ( ) ) { throw new IOException ( "Cannot seek after EOF" ) ; } boolean done = false ; if ( pos <= targetPos && targetPos <= blockEnd ) { int diff = ( int ) ( targetPos - pos ) ; if ( diff <= DFSClient . TCP_WINDOW_SIZE )...
Seek to a new arbitrary location
33,575
public synchronized boolean seekToNewSource ( long targetPos , boolean throwWhenNotFound ) throws IOException { boolean markedDead = deadNodes . containsKey ( currentNode ) ; addToDeadNodes ( currentNode ) ; DatanodeInfo oldNode = currentNode ; DatanodeInfo newNode = blockSeekTo ( targetPos , throwWhenNotFound ) ; if (...
Seek to given position on a node other than the current node . If a node other than the current node is found then returns true . If another node could not be found then returns false .
33,576
private boolean isTaskRunning ( TaskStatus taskStatus ) { TaskStatus . State state = taskStatus . getRunState ( ) ; return ( state == State . RUNNING || state == State . UNASSIGNED || taskStatus . inTaskCleanupPhase ( ) ) ; }
Is the given task considered as running ?
33,577
public int countMapTasks ( ) { int mapCount = 0 ; for ( Iterator < TaskStatus > it = taskReports . iterator ( ) ; it . hasNext ( ) ; ) { TaskStatus ts = ( TaskStatus ) it . next ( ) ; if ( ts . getIsMap ( ) && isTaskRunning ( ts ) ) { mapCount ++ ; } } return mapCount ; }
Get the number of running map tasks .
33,578
public int countOccupiedMapSlots ( ) { int mapSlotsCount = 0 ; for ( Iterator < TaskStatus > it = taskReports . iterator ( ) ; it . hasNext ( ) ; ) { TaskStatus ts = ( TaskStatus ) it . next ( ) ; if ( ts . getIsMap ( ) && isTaskRunning ( ts ) ) { mapSlotsCount += ts . getNumSlots ( ) ; } } return mapSlotsCount ; }
Get the number of occupied map slots .
33,579
public int countReduceTasks ( ) { int reduceCount = 0 ; for ( Iterator < TaskStatus > it = taskReports . iterator ( ) ; it . hasNext ( ) ; ) { TaskStatus ts = ( TaskStatus ) it . next ( ) ; if ( ( ! ts . getIsMap ( ) ) && isTaskRunning ( ts ) ) { reduceCount ++ ; } } return reduceCount ; }
Get the number of running reduce tasks .
33,580
public int countOccupiedReduceSlots ( ) { int reduceSlotsCount = 0 ; for ( Iterator < TaskStatus > it = taskReports . iterator ( ) ; it . hasNext ( ) ; ) { TaskStatus ts = ( TaskStatus ) it . next ( ) ; if ( ( ! ts . getIsMap ( ) ) && isTaskRunning ( ts ) ) { reduceSlotsCount += ts . getNumSlots ( ) ; } } return reduce...
Get the number of occupied reduce slots .
33,581
public static BinaryRecordInput get ( DataInput inp ) { BinaryRecordInput bin = ( BinaryRecordInput ) bIn . get ( ) ; bin . setDataInput ( inp ) ; return bin ; }
Get a thread - local record input for the supplied DataInput .
33,582
public boolean canLeave ( ) { if ( reached == 0 ) { return false ; } if ( namesystem . now ( ) - reached < extension ) { reportStatus ( "STATE* Safe mode ON." , false ) ; return false ; } return ! needEnter ( ) ; }
Safe mode can be turned off iff the threshold is reached and the extension time have passed .
33,583
private void reportStatus ( String msg , boolean rightNow ) { long curTime = FSNamesystem . now ( ) ; if ( ! rightNow && ( curTime - lastStatusReport < 20 * 1000 ) ) { return ; } FLOG . info ( msg + " \n" + getTurnOffTip ( ) ) ; lastStatusReport = curTime ; }
Print status every 20 seconds .
33,584
private boolean isConsistent ( ) { if ( this . reached < 0 ) { return true ; } if ( namesystem . getTotalBlocks ( ) == - 1 && namesystem . getSafeBlocks ( ) == - 1 ) { return true ; } long activeBlocks = namesystem . getBlocksTotal ( ) - namesystem . getPendingDeletionBlocks ( ) ; return ( namesystem . getTotalBlocks (...
Checks consistency of the class state . This is costly and currently called only in assert .
33,585
public boolean nextKey ( ) throws IOException , InterruptedException { while ( hasMore && nextKeyIsSame ) { nextKeyValue ( ) ; } if ( hasMore ) { if ( inputKeyCounter != null ) { inputKeyCounter . increment ( 1 ) ; } return nextKeyValue ( ) ; } else { return false ; } }
Start processing next unique key .
33,586
static FSImageCompression createCompression ( Configuration conf , boolean forceUncompressed ) throws IOException { boolean compressImage = ( ! forceUncompressed ) && conf . getBoolean ( HdfsConstants . DFS_IMAGE_COMPRESS_KEY , HdfsConstants . DFS_IMAGE_COMPRESS_DEFAULT ) ; if ( ! compressImage ) { return createNoopCom...
Create a compression instance based on the user s configuration in the given Configuration object .
33,587
public static FSImageCompression readCompressionHeader ( Configuration conf , DataInputStream dis ) throws IOException { boolean isCompressed = dis . readBoolean ( ) ; if ( ! isCompressed ) { return createNoopCompression ( ) ; } else { String codecClassName = Text . readString ( dis ) ; return createCompression ( conf ...
Create a compression instance based on a header read from an input stream .
33,588
public InputStream unwrapInputStream ( InputStream is ) throws IOException { if ( imageCodec != null ) { return imageCodec . createInputStream ( is ) ; } else { return is ; } }
Unwrap a compressed input stream by wrapping it with a decompressor based on this codec . If this instance represents no compression simply return the input stream .
33,589
DataOutputStream writeHeaderAndWrapStream ( OutputStream os ) throws IOException { DataOutputStream dos = new DataOutputStream ( os ) ; dos . writeBoolean ( imageCodec != null ) ; if ( imageCodec != null ) { String codecClassName = imageCodec . getClass ( ) . getCanonicalName ( ) ; Text . writeString ( dos , codecClass...
Write out a header to the given stream that indicates the chosen compression codec and return the same stream wrapped with that codec . If no codec is specified simply adds buffering to the stream so that the returned stream is always buffered .
33,590
public static TaskID downgrade ( org . apache . hadoop . mapreduce . TaskID old ) { if ( old instanceof TaskID ) { return ( TaskID ) old ; } else { return new TaskID ( JobID . downgrade ( old . getJobID ( ) ) , old . isMap ( ) , old . getId ( ) ) ; } }
Downgrade a new TaskID to an old one
33,591
private void quiesceIngestWithReprocess ( ) throws IOException { if ( ingest != null ) { LOG . info ( "Standby: Quiescing - quiescing ongoing ingest" ) ; quiesceIngest ( ) ; reprocessCurrentSegmentIfNeeded ( ingest . getIngestStatus ( ) ) ; } }
When ingest consumes the end of segment transaction it sets the state to not ingesting . This function ensures that the ingest thread exited .
33,592
private void quiesceIngest ( ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . STANDBY_QUIESCE_INGEST ) ; synchronized ( ingestStateLock ) { assertState ( StandbyIngestState . INGESTING_EDITS , StandbyIngestState . NOT_INGESTING ) ; ingest . quiesce ( ) ; } try { ingestThread . join ( ) ; curren...
Quiesces the currently running ingest
33,593
private void instantiateIngest ( ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . STANDBY_INSTANTIATE_INGEST ) ; try { synchronized ( ingestStateLock ) { if ( checkIngestState ( ) ) { LOG . info ( "Standby: Ingest for txid: " + currentSegmentTxId + " is already running" ) ; return ; } assertSta...
Instantiates ingest thread for the current edits segment .
33,594
private void reprocessCurrentSegmentIfNeeded ( boolean status ) throws IOException { if ( status ) { return ; } assertState ( StandbyIngestState . NOT_INGESTING ) ; LOG . info ( "Standby: Quiesce - reprocessing edits segment starting at: " + currentSegmentTxId ) ; instantiateIngest ( ) ; quiesceIngest ( ) ; if ( ! inge...
Processes previously consumed edits segment if needed
33,595
void triggerCheckpoint ( boolean uncompressed ) throws IOException { String pref = "Standby: Checkpoint - " ; LOG . info ( pref + "triggering checkpoint manually" ) ; if ( uncompressed ) { throwIOException ( pref + " uncompressed option not supported" , null ) ; } if ( manualCheckpointLatch . getCount ( ) > 0 ) { throw...
Trigger checkpoint . If there is an ongoing scheduled checkpoint this call will trigger a checkpoint immediately after . The method blocks until the checkpoint is done .
33,596
private void handleCheckpointFailure ( ) { setCheckpointFailures ( checkpointFailures + 1 ) ; if ( checkpointFailures > MAX_CHECKPOINT_FAILURES ) { LOG . fatal ( "Standby: Checkpointing - standby failed to checkpoint in " + checkpointFailures + " attempts. Aborting" ) ; } else { LOG . info ( "Sleeping " + CHECKPOINT_SL...
If checkpoint fails continuously we want to abort the standby . We want to avoid the situation in which the standby continuously rolls edit log on the primary without finalizing checkpoint .
33,597
private void uploadImage ( long txid ) throws IOException { final long start = AvatarNode . now ( ) ; LOG . info ( "Standby: Checkpointing - Upload fsimage to remote namenode." ) ; checkpointStatus ( "Image upload started" ) ; imageUploader = new ImageUploader ( txid ) ; imageUploader . start ( ) ; while ( running && !...
Creates image upload thread .
33,598
private void putFSImage ( long txid ) throws IOException { TransferFsImage . uploadImageFromStorage ( fsName , machineName , infoPort , fsImage . storage , txid ) ; }
Copy the new fsimage into the NameNode
33,599
private void checkImageValidation ( ) throws IOException { try { imageValidator . join ( ) ; } catch ( InterruptedException ie ) { throw ( IOException ) new InterruptedIOException ( ) . initCause ( ie ) ; } if ( ! imageValidator . succeeded ) { throw new IOException ( "Image file validation failed" , imageValidator . e...
Checks the status of image validation during checkpoint .