idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
33,500 | protected void setFields ( Properties props , StorageDirectory sd ) throws IOException { super . setFields ( props , sd ) ; boolean uState = getDistributedUpgradeState ( ) ; int uVersion = getDistributedUpgradeVersion ( ) ; if ( uState && uVersion != getLayoutVersion ( ) ) { props . setProperty ( "distributedUpgradeState" , Boolean . toString ( uState ) ) ; props . setProperty ( "distributedUpgradeVersion" , Integer . toString ( uVersion ) ) ; } } | Write version file into the storage directory . |
33,501 | private void setDeprecatedPropertiesForUpgrade ( Properties props ) { deprecatedProperties = new HashMap < String , String > ( ) ; String md5 = props . getProperty ( MESSAGE_DIGEST_PROPERTY ) ; if ( md5 != null ) { deprecatedProperties . put ( MESSAGE_DIGEST_PROPERTY , md5 ) ; } } | Pull any properties out of the VERSION file that are from older versions of HDFS and only necessary during upgrade . |
33,502 | File findFinalizedEditsFile ( long startTxId , long endTxId ) throws IOException { File ret = findFile ( NameNodeDirType . EDITS , getFinalizedEditsFileName ( startTxId , endTxId ) ) ; if ( ret == null ) { throw new IOException ( "No edits file for txid " + startTxId + "-" + endTxId + " exists!" ) ; } return ret ; } | Return the first readable finalized edits file for the given txid . |
33,503 | File findInProgressEditsFile ( long startTxId ) throws IOException { File ret = findFile ( NameNodeDirType . EDITS , getInProgressEditsFileName ( startTxId ) ) ; if ( ret == null ) { throw new IOException ( "No edits file for txid " + startTxId + "-in progress" ) ; } return ret ; } | Return the first readable inprogress edits file for the given txid . |
33,504 | private File findFile ( NameNodeDirType dirType , String name ) { for ( StorageDirectory sd : dirIterable ( dirType ) ) { File candidate = new File ( sd . getCurrentDir ( ) , name ) ; if ( sd . getCurrentDir ( ) . canRead ( ) && candidate . exists ( ) ) { return candidate ; } } return null ; } | Return the first readable storage file of the given name across any of the current directories in SDs of the given type or null if no such file exists . |
33,505 | static boolean isPreferred ( StorageLocationType type , StorageDirectory sd ) { if ( ( sd instanceof NNStorageDirectory ) ) { return ( ( NNStorageDirectory ) sd ) . type == type ; } return true ; } | Checks if we have information about this directory that it is preferred . |
33,506 | static StorageLocationType getType ( StorageDirectory sd ) { if ( ( sd instanceof NNStorageDirectory ) ) { return ( ( NNStorageDirectory ) sd ) . type ; } return StorageLocationType . LOCAL ; } | Get the type of given directory . |
33,507 | void verifyDistributedUpgradeProgress ( StartupOption startOpt ) throws IOException { if ( startOpt == StartupOption . ROLLBACK || startOpt == StartupOption . IMPORT ) return ; assert upgradeManager != null : "FSNameSystem.upgradeManager is null." ; if ( startOpt != StartupOption . UPGRADE ) { if ( upgradeManager . getUpgradeState ( ) ) throw new IOException ( "\n Previous distributed upgrade was not completed. " + "\n Please restart NameNode with -upgrade option." ) ; if ( upgradeManager . getDistributedUpgrades ( ) != null ) throw new IOException ( "\n Distributed upgrade for NameNode version " + upgradeManager . getUpgradeVersion ( ) + " to current LV " + layoutVersion + " is required.\n Please restart NameNode" + " with -upgrade option." ) ; } } | Verify that the distributed upgrade state is valid . |
33,508 | void initializeDistributedUpgrade ( ) throws IOException { if ( ! upgradeManager . initializeUpgrade ( ) ) return ; writeAll ( ) ; LOG . info ( "\n Distributed upgrade for NameNode version " + upgradeManager . getUpgradeVersion ( ) + " to current LV " + layoutVersion + " is initialized." ) ; } | Initialize a distributed upgrade . |
33,509 | synchronized void reportErrorsOnDirectories ( List < StorageDirectory > sds , FSImage image ) throws IOException { for ( StorageDirectory sd : sds ) { reportErrorsOnDirectory ( sd , image ) ; } if ( image != null ) { image . checkImageManagers ( ) ; } if ( ! sds . isEmpty ( ) ) { if ( this . getNumStorageDirs ( ) == 0 ) throw new IOException ( "No more storage directories left" ) ; if ( getNumStorageDirs ( NameNodeDirType . IMAGE ) == 0 ) throw new IOException ( "No more image storage directories left" ) ; } } | Marks a list of directories as having experienced an error . |
33,510 | synchronized void reportErrorsOnDirectory ( StorageDirectory sd , FSImage image ) { String lsd = listStorageDirectories ( ) ; LOG . info ( "reportErrorsOnDirectory: Current list of storage dirs:" + lsd ) ; LOG . error ( "reportErrorsOnDirectory: Error reported on storage directory " + sd . getRoot ( ) ) ; if ( this . storageDirs . remove ( sd ) ) { try { sd . unlock ( ) ; } catch ( Exception e ) { LOG . warn ( "reportErrorsOnDirectory: Unable to unlock bad storage directory: " + sd . getRoot ( ) . getPath ( ) , e ) ; } this . removedStorageDirs . add ( sd ) ; } if ( image != null ) { image . reportErrorsOnImageManager ( sd ) ; } lsd = listStorageDirectories ( ) ; LOG . info ( "reportErrorsOnDirectory: Current list of storage dirs:" + lsd ) ; } | Reports that a directory has experienced an error . Notifies listeners that the directory is no longer available . |
33,511 | public void reportErrorOnFile ( File f ) { String absPath = f . getAbsolutePath ( ) ; for ( StorageDirectory sd : storageDirs ) { String dirPath = sd . getRoot ( ) . getAbsolutePath ( ) ; if ( ! dirPath . endsWith ( "/" ) ) { dirPath += "/" ; } if ( absPath . startsWith ( dirPath ) ) { reportErrorsOnDirectory ( sd , null ) ; return ; } } } | Report that an IOE has occurred on some file which may or may not be within one of the NN image storage directories . |
33,512 | void inspectStorageDirs ( FSImageStorageInspector inspector ) throws IOException { for ( Iterator < StorageDirectory > it = dirIterator ( ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; inspector . inspectDirectory ( sd ) ; } } | Iterate over all current storage directories inspecting them with the given inspector . |
33,513 | FSImageStorageInspector readAndInspectDirs ( ) throws IOException { int minLayoutVersion = Integer . MAX_VALUE ; int maxLayoutVersion = Integer . MIN_VALUE ; for ( Iterator < StorageDirectory > it = dirIterator ( ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; if ( ! sd . getVersionFile ( ) . exists ( ) ) { FSImage . LOG . warn ( "Storage directory " + sd + " contains no VERSION file. Skipping..." ) ; continue ; } sd . read ( ) ; minLayoutVersion = Math . min ( minLayoutVersion , getLayoutVersion ( ) ) ; maxLayoutVersion = Math . max ( maxLayoutVersion , getLayoutVersion ( ) ) ; } if ( minLayoutVersion > maxLayoutVersion ) { throw new IOException ( "No storage directories contained VERSION information" ) ; } assert minLayoutVersion <= maxLayoutVersion ; FSImageStorageInspector inspector ; if ( LayoutVersion . supports ( Feature . TXID_BASED_LAYOUT , minLayoutVersion ) ) { inspector = new FSImageTransactionalStorageInspector ( ) ; if ( ! LayoutVersion . supports ( Feature . TXID_BASED_LAYOUT , maxLayoutVersion ) ) { FSImage . LOG . warn ( "Ignoring one or more storage directories with old layouts" ) ; } } else { inspector = new FSImagePreTransactionalStorageInspector ( conf ) ; } inspectStorageDirs ( inspector ) ; return inspector ; } | Iterate over all of the storage dirs reading their contents to determine their layout versions . Returns an FSImageStorageInspector which has inspected each directory . |
33,514 | public static boolean isAvailable ( ) { try { String osName = System . getProperty ( "os.name" ) ; if ( ! osName . startsWith ( "Linux" ) ) { LOG . info ( "ProcfsBasedProcessTree currently is supported only on " + "Linux." ) ; return false ; } } catch ( SecurityException se ) { LOG . warn ( "Failed to get Operating System name. " + se ) ; return false ; } return true ; } | Checks if the ProcfsBasedProcessTree is available on this system . |
33,515 | public ProcfsBasedProcessTree getProcessTree ( ) { if ( pid != - 1 ) { List < Integer > processList = getProcessList ( ) ; Map < Integer , ProcessInfo > allProcessInfo = new HashMap < Integer , ProcessInfo > ( ) ; Map < Integer , ProcessInfo > oldProcs = new HashMap < Integer , ProcessInfo > ( processTree ) ; processTree . clear ( ) ; ProcessInfo me = null ; for ( Integer proc : processList ) { ProcessInfo pInfo = new ProcessInfo ( proc ) ; if ( constructProcessInfo ( pInfo , procfsDir ) != null ) { allProcessInfo . put ( proc , pInfo ) ; if ( proc . equals ( this . pid ) ) { me = pInfo ; processTree . put ( proc , pInfo ) ; } } } if ( me == null ) { return this ; } for ( Map . Entry < Integer , ProcessInfo > entry : allProcessInfo . entrySet ( ) ) { Integer pID = entry . getKey ( ) ; if ( pID != 1 ) { ProcessInfo pInfo = entry . getValue ( ) ; ProcessInfo parentPInfo = allProcessInfo . get ( pInfo . getPpid ( ) ) ; if ( parentPInfo != null ) { parentPInfo . addChild ( pInfo ) ; } } } LinkedList < ProcessInfo > pInfoQueue = new LinkedList < ProcessInfo > ( ) ; pInfoQueue . addAll ( me . getChildren ( ) ) ; while ( ! pInfoQueue . isEmpty ( ) ) { ProcessInfo pInfo = pInfoQueue . remove ( ) ; if ( ! processTree . containsKey ( pInfo . getPid ( ) ) ) { processTree . put ( pInfo . getPid ( ) , pInfo ) ; } pInfoQueue . addAll ( pInfo . getChildren ( ) ) ; } for ( Map . Entry < Integer , ProcessInfo > procs : processTree . entrySet ( ) ) { ProcessInfo oldInfo = oldProcs . get ( procs . getKey ( ) ) ; if ( procs . getValue ( ) != null ) { procs . getValue ( ) . updateJiffy ( oldInfo ) ; if ( oldInfo != null ) { procs . getValue ( ) . updateAge ( oldInfo ) ; } } } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( this . toString ( ) ) ; } } return this ; } | Get the process - tree with latest state . If the root - process is not alive an empty tree will be returned . |
33,516 | public boolean isAnyProcessInTreeAlive ( ) { for ( Integer pId : processTree . keySet ( ) ) { if ( isAlive ( pId . toString ( ) ) ) { return true ; } } return false ; } | Is any of the subprocesses in the process - tree alive? |
33,517 | static boolean checkPidPgrpidForMatch ( String pidStr , String procfsDir ) { Integer pId = Integer . parseInt ( pidStr ) ; ProcessInfo pInfo = new ProcessInfo ( pId ) ; pInfo = constructProcessInfo ( pInfo , procfsDir ) ; if ( pInfo == null ) { return true ; } if ( ! pInfo . getPgrpId ( ) . equals ( pId ) ) { LOG . warn ( "Unexpected: Process with PID " + pId + " is not a process group leader." ) ; return false ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( pId + " is a process group leader, as expected." ) ; } return true ; } | Verify that the given process id is same as its process group id . |
33,518 | public static void assertAndDestroyProcessGroup ( String pgrpId , long interval , boolean inBackground ) throws IOException { if ( ! checkPidPgrpidForMatch ( pgrpId , PROCFS ) ) { throw new IOException ( "Process with PID " + pgrpId + " is not a process group leader." ) ; } destroyProcessGroup ( pgrpId , interval , inBackground ) ; } | Make sure that the given pid is a process group leader and then destroy the process group . |
33,519 | public String getProcessTreeDump ( ) { StringBuilder ret = new StringBuilder ( ) ; ret . append ( String . format ( "\t|- PID PPID PGRPID SESSID CMD_NAME " + "USER_MODE_TIME(MILLIS) SYSTEM_TIME(MILLIS) VMEM_USAGE(BYTES) " + "RSSMEM_USAGE(PAGES) FULL_CMD_LINE\n" ) ) ; for ( ProcessInfo p : processTree . values ( ) ) { if ( p != null ) { ret . append ( String . format ( PROCESSTREE_DUMP_FORMAT , p . getPid ( ) , p . getPpid ( ) , p . getPgrpId ( ) , p . getSessionId ( ) , p . getName ( ) , p . getUtime ( ) , p . getStime ( ) , p . getVmem ( ) , p . getRssmemPage ( ) , p . getCmdLine ( procfsDir ) ) ) ; } } return ret . toString ( ) ; } | Get a dump of the process - tree . |
33,520 | public Collection < String > getProcessNameContainsCount ( String name ) { List < String > retProcessList = new ArrayList < String > ( ) ; List < Integer > processList = getProcessList ( ) ; for ( Integer proc : processList ) { ProcessInfo p = new ProcessInfo ( proc ) ; if ( constructProcessInfo ( p , procfsDir ) != null ) { if ( p . getCmdLine ( procfsDir ) . contains ( name ) ) { StringBuilder processSb = new StringBuilder ( ) ; processSb . append ( String . format ( PROCESSTREE_DUMP_FORMAT , p . getPid ( ) , p . getPpid ( ) , p . getPgrpId ( ) , p . getSessionId ( ) , p . getName ( ) , p . getUtime ( ) , p . getStime ( ) , p . getVmem ( ) , p . getRssmemPage ( ) , p . getCmdLine ( procfsDir ) ) ) ; retProcessList . add ( processSb . toString ( ) ) ; } } } return retProcessList ; } | Get a count of the number of processes that have a commandline that matches a name . |
33,521 | public long getCumulativeVmem ( int olderThanAge ) { long total = 0 ; for ( ProcessInfo p : processTree . values ( ) ) { if ( ( p != null ) && ( p . getAge ( ) > olderThanAge ) ) { total += p . getVmem ( ) ; } } return total ; } | Get the cumulative virtual memory used by all the processes in the process - tree that are older than the passed in age . |
33,522 | public long getCumulativeCpuTime ( ) { if ( JIFFY_LENGTH_IN_MILLIS < 0 ) { return 0 ; } long incJiffies = 0 ; for ( ProcessInfo p : processTree . values ( ) ) { if ( p != null ) { incJiffies += p . dtime ; } } if ( incJiffies * JIFFY_LENGTH_IN_MILLIS < Integer . MAX_VALUE ) { cpuTime += incJiffies * JIFFY_LENGTH_IN_MILLIS ; } return cpuTime ; } | Get the CPU time in millisecond used by all the processes in the process - tree since the process - tree created |
33,523 | private List < Integer > getProcessList ( ) { String [ ] processDirs = ( new File ( procfsDir ) ) . list ( ) ; List < Integer > processList = new ArrayList < Integer > ( ) ; for ( String dir : processDirs ) { try { int pd = Integer . parseInt ( dir ) ; if ( ( new File ( procfsDir , dir ) ) . isDirectory ( ) ) { processList . add ( Integer . valueOf ( pd ) ) ; } } catch ( NumberFormatException n ) { } catch ( SecurityException s ) { } } return processList ; } | Get the list of all processes in the system . |
33,524 | private static ProcessInfo constructProcessInfo ( ProcessInfo pinfo , String procfsDir ) { ProcessInfo ret = null ; BufferedReader in = null ; FileReader fReader = null ; try { File pidDir = new File ( procfsDir , String . valueOf ( pinfo . getPid ( ) ) ) ; fReader = new FileReader ( new File ( pidDir , PROCFS_STAT_FILE ) ) ; in = new BufferedReader ( fReader ) ; } catch ( FileNotFoundException f ) { LOG . debug ( "The process " + pinfo . getPid ( ) + " may have finished in the interim." ) ; return ret ; } ret = pinfo ; try { String str = in . readLine ( ) ; Matcher m = PROCFS_STAT_FILE_FORMAT . matcher ( str ) ; boolean mat = m . find ( ) ; if ( mat ) { pinfo . updateProcessInfo ( m . group ( 2 ) , Integer . parseInt ( m . group ( 3 ) ) , Integer . parseInt ( m . group ( 4 ) ) , Integer . parseInt ( m . group ( 5 ) ) , Long . parseLong ( m . group ( 7 ) ) , Long . parseLong ( m . group ( 8 ) ) , Long . parseLong ( m . group ( 10 ) ) , Long . parseLong ( m . group ( 11 ) ) ) ; } else { LOG . warn ( "Unexpected: procfs stat file is not in the expected format" + " for process with pid " + pinfo . getPid ( ) ) ; ret = null ; } } catch ( IOException io ) { LOG . warn ( "Error reading the stream " + io ) ; ret = null ; } finally { try { fReader . close ( ) ; try { in . close ( ) ; } catch ( IOException i ) { LOG . warn ( "Error closing the stream " + in ) ; } } catch ( IOException i ) { LOG . warn ( "Error closing the stream " + fReader ) ; } } return ret ; } | Construct the ProcessInfo using the process PID and procfs rooted at the specified directory and return the same . It is provided mainly to assist testing purposes . |
33,525 | private static void parseOptions ( String args [ ] ) { if ( ! ( args . length == 14 || args . length == 12 || args . length == 5 ) ) { usage ( ) ; } for ( int i = 0 ; i < args . length ; i ++ ) { if ( args [ i ] . equals ( "-s" ) && i + 1 < args . length ) { dfsServer_ = args [ i + 1 ] ; System . out . println ( args [ i + 1 ] ) ; i ++ ; } else if ( args [ i ] . equals ( "-p" ) && i + 1 < args . length ) { dfsPort_ = Integer . parseInt ( args [ i + 1 ] ) ; System . out . println ( args [ i + 1 ] ) ; i ++ ; } else if ( args [ i ] . equals ( "-t" ) && i + 1 < args . length ) { testName_ = args [ i + 1 ] ; System . out . println ( args [ i + 1 ] ) ; i ++ ; } else if ( args [ i ] . equals ( "-a" ) && i + 1 < args . length ) { planfilePath_ = args [ i + 1 ] ; System . out . println ( args [ i + 1 ] ) ; i ++ ; } else if ( args [ i ] . equals ( "-c" ) && i + 1 < args . length ) { hostName_ = args [ i + 1 ] ; System . out . println ( args [ i + 1 ] ) ; i ++ ; } else if ( args [ i ] . equals ( "-n" ) && i + 1 < args . length ) { processName_ = args [ i + 1 ] ; System . out . println ( args [ i + 1 ] ) ; i ++ ; } else if ( args [ i ] . equals ( "-P" ) && i + 1 < args . length ) { prefix_ = args [ i + 1 ] ; System . out . println ( args [ i + 1 ] ) ; i ++ ; } } if ( dfsServer_ . length ( ) == 0 || testName_ . length ( ) == 0 || planfilePath_ . length ( ) == 0 || hostName_ . length ( ) == 0 || processName_ . length ( ) == 0 || dfsPort_ == 0 ) { usage ( ) ; } if ( prefix_ == null ) { prefix_ = new String ( "PATH_PREFIX_" ) ; } prefixLen_ = prefix_ . length ( ) ; } | Parses command line options . the hostName processName are used to namespace the files created by this instance of the benchmark . |
33,526 | private static void usage ( ) { String className = MStress_Client . class . getName ( ) ; System . out . printf ( "Usage: java %s -s dfs-server -p dfs-port" + "[-t [create|stat|read|readdir|delete|rename] -a planfile-path -c host -n process-name" + " -P prefix]\n" , className ) ; System . out . printf ( " -t: this option requires -a, -c, and -n options.\n" ) ; System . out . printf ( " -P: default prefix is PATH_.\n" ) ; System . out . printf ( "eg:\n" ) ; System . out . printf ( " java %s -s <metaserver-host> -p <metaserver-port> -t create" + " -a <planfile> -c localhost -n Proc_00\n" , className ) ; System . exit ( 1 ) ; } | Prints usage information to standard out . |
33,527 | private static int parsePlanFile ( ) { int ret = - 1 ; try { FileInputStream fis = new FileInputStream ( planfilePath_ ) ; DataInputStream dis = new DataInputStream ( fis ) ; BufferedReader br = new BufferedReader ( new InputStreamReader ( dis ) ) ; if ( prefix_ . isEmpty ( ) ) { prefix_ = "PATH_PREFIX_" ; } String line ; while ( ( line = br . readLine ( ) ) != null ) { if ( line . length ( ) == 0 || line . startsWith ( "#" ) ) { continue ; } if ( line . startsWith ( "type=" ) ) { type_ = line . substring ( 5 ) ; continue ; } if ( line . startsWith ( "levels=" ) ) { levels_ = Integer . parseInt ( line . substring ( 7 ) ) ; continue ; } if ( line . startsWith ( "inodes=" ) ) { inodesPerLevel_ = Integer . parseInt ( line . substring ( 7 ) ) ; continue ; } if ( line . startsWith ( "nstat=" ) ) { pathsToStat_ = Integer . parseInt ( line . substring ( 6 ) ) ; continue ; } } dis . close ( ) ; if ( levels_ > 0 && ! type_ . isEmpty ( ) && inodesPerLevel_ > 0 && pathsToStat_ > 0 ) { ret = 0 ; } } catch ( Exception e ) { System . out . println ( "Error: " + e . getMessage ( ) ) ; } return ret ; } | Parses the plan file that contains parameters for the benchmark . |
33,528 | private static int CreateDFSPaths ( int level , String parentPath ) { Boolean isLeaf = false ; Boolean isDir = false ; if ( level + 1 >= levels_ ) { isLeaf = true ; } if ( isLeaf ) { if ( type_ . equals ( "dir" ) ) { isDir = true ; } else { isDir = false ; } } else { isDir = true ; } for ( int i = 0 ; i < inodesPerLevel_ ; i ++ ) { String path = parentPath + "/" + prefix_ + Integer . toString ( i ) ; if ( isDir ) { try { long startTime = System . nanoTime ( ) ; if ( dfsClient_ . mkdirs ( path ) == false ) { System . out . printf ( "Error in mkdirs(%s)\n" , path ) ; return - 1 ; } timingMkdirs_ . add ( new Double ( ( System . nanoTime ( ) - startTime ) / ( 1E9 ) ) ) ; System . out . printf ( "Creating dir %s\n" , path ) ; totalCreateCount ++ ; if ( totalCreateCount % COUNT_INCR == 0 ) { System . out . printf ( "Created paths so far: %d\n" , totalCreateCount ) ; } if ( ! isLeaf ) { if ( CreateDFSPaths ( level + 1 , path ) < 0 ) { System . out . printf ( "Error in CreateDFSPaths(%s)\n" , path ) ; return - 1 ; } } } catch ( IOException e ) { e . printStackTrace ( ) ; return - 1 ; } } else { try { System . out . printf ( "Creating file %s\n" , path ) ; long startTime = System . nanoTime ( ) ; OutputStream os = dfsClient_ . create ( path , true ) ; timingCreate_ . add ( new Double ( ( System . nanoTime ( ) - startTime ) / ( 1E9 ) ) ) ; files_ . put ( path , os ) ; totalCreateCount ++ ; if ( totalCreateCount % COUNT_INCR == 0 ) { System . out . printf ( "Created paths so far: %d\n" , totalCreateCount ) ; } } catch ( IOException e ) { e . printStackTrace ( ) ; return - 1 ; } } } return 0 ; } | Recursively creates directories and files . |
33,529 | private static int createWriteDFSPaths ( ) { if ( createDFSPaths ( ) != 0 ) { return - 1 ; } try { for ( Map . Entry < String , OutputStream > file : files_ . entrySet ( ) ) { OutputStream os = file . getValue ( ) ; long startTime = System . nanoTime ( ) ; os . write ( data_ . getBytes ( ) ) ; timingWrite_ . add ( new Double ( ( System . nanoTime ( ) - startTime ) / ( 1E9 ) ) ) ; os . close ( ) ; } } catch ( IOException e ) { e . printStackTrace ( ) ; return - 1 ; } return 0 ; } | This creates DFS paths and writes data_ to them in one go . |
33,530 | @ SuppressWarnings ( "deprecation" ) private void localizeTaskConfiguration ( TaskTracker tracker , JobConf ttConf , String workDir , Task t , JobID jobID ) throws IOException { Path jobFile = new Path ( t . getJobFile ( ) ) ; FileSystem systemFS = tracker . systemFS ; this . localizedJobFile = new Path ( workDir , jobID + ".xml" ) ; LOG . info ( "Localizing CJT configuration from " + jobFile + " to " + localizedJobFile ) ; systemFS . copyToLocalFile ( jobFile , localizedJobFile ) ; JobConf localJobConf = new JobConf ( localizedJobFile ) ; boolean modified = Task . saveStaticResolutions ( localJobConf ) ; if ( modified ) { FSDataOutputStream out = new FSDataOutputStream ( new FileOutputStream ( localizedJobFile . toUri ( ) . getPath ( ) ) ) ; try { localJobConf . writeXml ( out ) ; } catch ( IOException e ) { out . close ( ) ; throw e ; } } this . conf . addResource ( localizedJobFile ) ; } | Copies the job file to the working directory of the process that will be started . |
33,531 | public boolean prepare ( ) throws IOException { if ( ! super . prepare ( ) ) { return false ; } mapOutputFile . removeAll ( getTask ( ) . getTaskID ( ) ) ; return true ; } | Delete any temporary files from previous failed attempts . |
33,532 | protected void addDefaults ( ) { try { Parser . CNode . addIdentifier ( "inner" , InnerJoinRecordReader . class ) ; Parser . CNode . addIdentifier ( "outer" , OuterJoinRecordReader . class ) ; Parser . CNode . addIdentifier ( "override" , OverrideRecordReader . class ) ; Parser . WNode . addIdentifier ( "tbl" , WrappedRecordReader . class ) ; } catch ( NoSuchMethodException e ) { throw new RuntimeException ( "FATAL: Failed to init defaults" , e ) ; } } | Adds the default set of identifiers to the parser . |
33,533 | private void addUserIdentifiers ( JobConf job ) throws IOException { Pattern x = Pattern . compile ( "^mapred\\.join\\.define\\.(\\w+)$" ) ; for ( Map . Entry < String , String > kv : job ) { Matcher m = x . matcher ( kv . getKey ( ) ) ; if ( m . matches ( ) ) { try { Parser . CNode . addIdentifier ( m . group ( 1 ) , job . getClass ( m . group ( 0 ) , null , ComposableRecordReader . class ) ) ; } catch ( NoSuchMethodException e ) { throw ( IOException ) new IOException ( "Invalid define for " + m . group ( 1 ) ) . initCause ( e ) ; } } } } | Inform the parser of user - defined types . |
33,534 | public InputSplit [ ] getSplits ( JobConf job , int numSplits ) throws IOException { setFormat ( job ) ; job . setLong ( "mapred.min.split.size" , Long . MAX_VALUE ) ; return root . getSplits ( job , numSplits ) ; } | Build a CompositeInputSplit from the child InputFormats by assigning the ith split from each child to the ith composite split . |
33,535 | @ SuppressWarnings ( "unchecked" ) public ComposableRecordReader < K , TupleWritable > getRecordReader ( InputSplit split , JobConf job , Reporter reporter ) throws IOException { setFormat ( job ) ; return root . getRecordReader ( split , job , reporter ) ; } | Construct a CompositeRecordReader for the children of this InputFormat as defined in the init expression . The outermost join need only be composable not necessarily a composite . Mandating TupleWritable isn t strictly correct . |
33,536 | public static void main ( String [ ] args ) { if ( args . length == 3 && "-getlevel" . equals ( args [ 0 ] ) ) { process ( "http://" + args [ 1 ] + "/logLevel?log=" + args [ 2 ] ) ; return ; } else if ( args . length == 4 && "-setlevel" . equals ( args [ 0 ] ) ) { process ( "http://" + args [ 1 ] + "/logLevel?log=" + args [ 2 ] + "&level=" + args [ 3 ] ) ; return ; } System . err . println ( USAGES ) ; System . exit ( - 1 ) ; } | A command line implementation |
33,537 | public FileStatus getFileStatus ( Path f ) throws IOException { INode inode = store . retrieveINode ( makeAbsolute ( f ) ) ; if ( inode == null ) { throw new FileNotFoundException ( f + ": No such file or directory." ) ; } return new S3FileStatus ( f . makeQualified ( this ) , inode ) ; } | FileStatus for S3 file systems . |
33,538 | public void assignClient ( long clientId ) { LOG . info ( "Assigning client " + clientId + " ..." ) ; synchronized ( clientModificationsLock ) { newlyAssignedClients . add ( clientId ) ; } } | Assigns a client to this dispatcher . If a notification fails to be sent to a client then it will be placed in a queue and the assigned dispatcher for each client will try to re - send notifications from that queue . |
33,539 | public void handleFailedDispatch ( long clientId , long failedTime ) { ClientData clientData = core . getClientData ( clientId ) ; if ( failedTime == - 1 || clientData == null ) return ; if ( clientData . markedAsFailedTime == - 1 ) { clientData . markedAsFailedTime = failedTime ; LOG . info ( "Marked client " + clientId + " as failed at " + failedTime ) ; } clientData . lastSentTime = failedTime ; } | Called each time a handleNotification or heartbeat Thrift call fails . |
33,540 | public void handleSuccessfulDispatch ( long clientId , long sentTime ) { ClientData clientData = core . getClientData ( clientId ) ; if ( sentTime == - 1 || clientData == null ) return ; clientData . markedAsFailedTime = - 1 ; if ( clientData . markedAsFailedTime != - 1 ) { LOG . info ( "Unmarking " + clientId + " at " + sentTime ) ; } clientData . lastSentTime = sentTime ; } | Called each time a handleNotification or heartbeat Thrift call is successful . |
33,541 | private void updateClients ( ) { assignedClients . addAll ( newlyAssignedClients ) ; assignedClients . removeAll ( removedClients ) ; newlyAssignedClients . clear ( ) ; removedClients . clear ( ) ; } | Must be called with the clientsModificationLock hold . |
33,542 | private void populateAccess ( ) throws SQLException { PreparedStatement statement = null ; try { statement = connection . prepareStatement ( "INSERT INTO Access(url, referrer, time)" + " VALUES (?, ?, ?)" ) ; Random random = new Random ( ) ; int time = random . nextInt ( 50 ) + 50 ; final int PROBABILITY_PRECISION = 100 ; final int NEW_PAGE_PROBABILITY = 15 ; String [ ] pages = { "/a" , "/b" , "/c" , "/d" , "/e" , "/f" , "/g" , "/h" , "/i" , "/j" } ; int [ ] [ ] linkMatrix = { { 1 , 5 , 7 } , { 0 , 7 , 4 , 6 , } , { 0 , 1 , 7 , 8 } , { 0 , 2 , 4 , 6 , 7 , 9 } , { 0 , 1 } , { 0 , 3 , 5 , 9 } , { 0 } , { 0 , 1 , 3 } , { 0 , 2 , 6 } , { 0 , 2 , 6 } } ; int currentPage = random . nextInt ( pages . length ) ; String referrer = null ; for ( int i = 0 ; i < time ; i ++ ) { statement . setString ( 1 , pages [ currentPage ] ) ; statement . setString ( 2 , referrer ) ; statement . setLong ( 3 , i ) ; statement . execute ( ) ; int action = random . nextInt ( PROBABILITY_PRECISION ) ; if ( action < NEW_PAGE_PROBABILITY ) { currentPage = random . nextInt ( pages . length ) ; referrer = null ; } else { referrer = pages [ currentPage ] ; action = random . nextInt ( linkMatrix [ currentPage ] . length ) ; currentPage = linkMatrix [ currentPage ] [ action ] ; } } connection . commit ( ) ; } catch ( SQLException ex ) { connection . rollback ( ) ; throw ex ; } finally { if ( statement != null ) { statement . close ( ) ; } } } | Populates the Access table with generated records . |
33,543 | private boolean verify ( ) throws SQLException { String countAccessQuery = "SELECT COUNT(*) FROM Access" ; String sumPageviewQuery = "SELECT SUM(pageview) FROM Pageview" ; Statement st = null ; ResultSet rs = null ; try { st = connection . createStatement ( ) ; rs = st . executeQuery ( countAccessQuery ) ; rs . next ( ) ; long totalPageview = rs . getLong ( 1 ) ; rs = st . executeQuery ( sumPageviewQuery ) ; rs . next ( ) ; long sumPageview = rs . getLong ( 1 ) ; LOG . info ( "totalPageview=" + totalPageview ) ; LOG . info ( "sumPageview=" + sumPageview ) ; return totalPageview == sumPageview && totalPageview != 0 ; } finally { if ( st != null ) st . close ( ) ; if ( rs != null ) rs . close ( ) ; } } | Verifies the results are correct |
33,544 | DatanodeBlockInfo get ( int namespaceId , Block block ) { checkBlock ( block ) ; NamespaceMap nm = getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return null ; } return nm . getBlockInfo ( block ) ; } | Get the meta information of the replica that matches both block id and generation stamp |
33,545 | DatanodeBlockInfo remove ( int namespaceId , Block block ) { NamespaceMap nm = getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return null ; } if ( datasetDelta != null ) { datasetDelta . removeBlock ( namespaceId , block ) ; } return nm . removeBlockInfo ( block ) ; } | Remove the replica s meta information from the map that matches the input block s id and generation stamp |
33,546 | int size ( int namespaceId ) { NamespaceMap nm = getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return 0 ; } return nm . size ( ) ; } | Get the size of the map for given namespace |
33,547 | ActiveFile getOngoingCreates ( int namespaceId , Block block ) { checkBlock ( block ) ; NamespaceMap nm = getNamespaceMap ( namespaceId ) ; if ( nm == null ) { return null ; } return nm . getOngoingCreates ( block ) ; } | for ongoing creates |
33,548 | public static void parseJobTasks ( String jobHistoryFile , JobHistory . JobInfo job , FileSystem fs ) throws IOException { JobHistory . parseHistoryFromFS ( jobHistoryFile , new JobTasksParseListener ( job ) , fs ) ; } | Populates a JobInfo object from the job s history log file . |
33,549 | private void validateAllPolicies ( Collection < PolicyInfo > all ) throws IOException , NumberFormatException { for ( PolicyInfo pinfo : all ) { Path srcPath = pinfo . getSrcPath ( ) ; if ( srcPath == null ) { throw new IOException ( "Unable to find srcPath in policy." ) ; } if ( pinfo . getProperty ( "replication" ) == null ) { throw new IOException ( "Unable to find replication in policy." + srcPath ) ; } int repl = Integer . parseInt ( pinfo . getProperty ( "replication" ) ) ; if ( pinfo . getProperty ( "modTimePeriod" ) == null ) { throw new IOException ( "Unable to find modTimePeriod in policy." + srcPath ) ; } long value = Long . parseLong ( pinfo . getProperty ( "modTimePeriod" ) ) ; List < PathInfo > dpaths = pinfo . getDestPaths ( ) ; if ( dpaths == null || dpaths . size ( ) == 0 ) { throw new IOException ( "Unable to find dest in policy." + srcPath ) ; } for ( PathInfo pp : dpaths ) { if ( pp . getPath ( ) == null ) { throw new IOException ( "Unable to find valid destPath in policy " + srcPath ) ; } if ( pp . getProperty ( "replication" ) == null ) { throw new IOException ( "Unable to find dest replication in policy." + srcPath ) ; } repl = Integer . parseInt ( pp . getProperty ( "replication" ) ) ; } } } | Validate a collection of policies |
33,550 | private void purgeDirectories ( FileSystem fs , Path root ) throws IOException { DirectoryTraversal traversal = DirectoryTraversal . directoryRetriever ( Arrays . asList ( root ) , fs , directoryTraversalThreads , directoryTraversalShuffle ) ; String prefix = root . toUri ( ) . getPath ( ) ; FileStatus dir ; while ( ( dir = traversal . next ( ) ) != DirectoryTraversal . FINISH_TOKEN ) { Path dirPath = dir . getPath ( ) ; if ( dirPath . toUri ( ) . getPath ( ) . endsWith ( RaidNode . HAR_SUFFIX ) ) { continue ; } String dirStr = dirPath . toUri ( ) . getPath ( ) ; if ( ! dirStr . startsWith ( prefix ) ) { continue ; } entriesProcessed . incrementAndGet ( ) ; String src = dirStr . replaceFirst ( prefix , "" ) ; if ( src . length ( ) == 0 ) continue ; Path srcPath = new Path ( src ) ; if ( ! fs . exists ( srcPath ) ) { performDelete ( fs , dirPath , true ) ; } } } | Traverse the parity destination directory removing directories that no longer existing in the source . |
33,551 | private static boolean existsBetterParityFile ( Codec codec , FileStatus srcStat , Configuration conf ) throws IOException { for ( Codec c : Codec . getCodecs ( ) ) { if ( c . priority > codec . priority ) { ParityFilePair ppair = ParityFilePair . getParityFile ( c , srcStat , conf ) ; if ( ppair != null ) { return true ; } } } return false ; } | Is there a parity file which has a codec with higher priority? |
33,552 | public int copyTo ( byte [ ] dest , int start ) throws BufferTooSmallException { if ( size > ( dest . length - start ) ) { throw new BufferTooSmallException ( "size is " + size + ", buffer availabe size is " + ( dest . length - start ) ) ; } if ( size > 0 ) { System . arraycopy ( bytes , 0 , dest , start , size ) ; } return size ; } | copy the byte array to the dest array and return the number of bytes copied . |
33,553 | public void setCapacity ( int new_cap ) { if ( new_cap != getCapacity ( ) ) { byte [ ] new_data = new byte [ new_cap ] ; if ( new_cap < size ) { size = new_cap ; } if ( size != 0 ) { System . arraycopy ( bytes , 0 , new_data , 0 , size ) ; } bytes = new_data ; } } | Change the capacity of the backing storage . The data is preserved . |
33,554 | public void set ( byte [ ] newData , int offset , int length ) { setSize ( 0 ) ; setSize ( length ) ; System . arraycopy ( newData , offset , bytes , 0 , size ) ; } | Set the value to a copy of the given byte range |
33,555 | protected static long setBits ( long num , int start , int len , long value ) { value = value & ( ( 1L << len ) - 1 ) ; long val_mask = value << start ; long zero_mask = ~ ( ( ( 1L << len ) - 1 ) << start ) ; return ( num & zero_mask ) | val_mask ; } | Sets specific bits of a specific number . |
33,556 | private int initFileDirTables ( ) { try { initFileDirTables ( root ) ; } catch ( IOException e ) { System . err . println ( e . getLocalizedMessage ( ) ) ; e . printStackTrace ( ) ; return - 1 ; } if ( dirs . isEmpty ( ) ) { System . err . println ( "The test space " + root + " is empty" ) ; return - 1 ; } if ( files . isEmpty ( ) ) { System . err . println ( "The test space " + root + " does not have any file" ) ; return - 1 ; } return 0 ; } | Create a table that contains all directories under root and another table that contains all files under root . |
33,557 | private void initFileDirTables ( Path path ) throws IOException { FileStatus [ ] stats = fs . listStatus ( path ) ; if ( stats != null ) { for ( FileStatus stat : stats ) { if ( stat . isDir ( ) ) { dirs . add ( stat . getPath ( ) . toString ( ) ) ; initFileDirTables ( stat . getPath ( ) ) ; } else { Path filePath = stat . getPath ( ) ; if ( filePath . getName ( ) . startsWith ( StructureGenerator . FILE_NAME_PREFIX ) ) { files . add ( filePath . toString ( ) ) ; } } } } } | Create a table that contains all directories under the specified path and another table that contains all files under the specified path and whose name starts with _file_ . |
33,558 | protected String [ ] getTaskDiagnosticsImpl ( TaskAttemptID taskId ) throws IOException { List < String > taskDiagnosticInfo = null ; JobID jobId = taskId . getJobID ( ) ; TaskID tipId = taskId . getTaskID ( ) ; JobInProgressTraits job = getJobInProgress ( jobId ) ; if ( job != null && job . inited ( ) ) { TaskInProgress tip = job . getTaskInProgress ( tipId ) ; if ( tip != null ) { taskDiagnosticInfo = tip . getDiagnosticInfo ( taskId ) ; } } return ( ( taskDiagnosticInfo == null ) ? EMPTY_TASK_DIAGNOSTICS : taskDiagnosticInfo . toArray ( new String [ taskDiagnosticInfo . size ( ) ] ) ) ; } | Get the diagnostics for a given task |
33,559 | public TaskInProgress getTip ( TaskID tipid ) { JobInProgressTraits job = getJobInProgress ( tipid . getJobID ( ) ) ; return ( job == null ? null : job . getTaskInProgress ( tipid ) ) ; } | Returns specified TaskInProgress or null . |
33,560 | public T pollFirst ( ) { if ( head == null ) { return null ; } T first = head . element ; this . remove ( first ) ; return first ; } | Remove and return first element on the linked list of all elements . |
33,561 | public void pollNToList ( int n , List < T > retList ) { if ( n >= size ) { pollAllToList ( retList ) ; } while ( n -- > 0 && head != null ) { T curr = head . element ; this . removeElem ( curr ) ; retList . add ( curr ) ; } shrinkIfNecessary ( ) ; } | Remove and return first n elements on the linked list of all elements . Put elements into the given list . |
33,562 | public static Path getTaskOutputPath ( JobConf conf , String name ) throws IOException { Path outputPath = getOutputPath ( conf ) ; if ( outputPath == null ) { throw new IOException ( "Undefined job output-path" ) ; } OutputCommitter committer = conf . getOutputCommitter ( ) ; Path workPath = outputPath ; TaskAttemptContext context = new TaskAttemptContext ( conf , TaskAttemptID . forName ( conf . get ( "mapred.task.id" ) ) ) ; if ( committer instanceof FileOutputCommitter ) { workPath = ( ( FileOutputCommitter ) committer ) . getWorkPath ( context , outputPath ) ; } return new Path ( workPath , name ) ; } | Helper function to create the task s temporary output directory and return the path to the task s output file . |
33,563 | public static String getUniqueName ( JobConf conf , String name ) { int partition = conf . getInt ( "mapred.task.partition" , - 1 ) ; if ( partition == - 1 ) { throw new IllegalArgumentException ( "This method can only be called from within a Job" ) ; } String taskType = ( conf . getBoolean ( "mapred.task.is.map" , true ) ) ? "m" : "r" ; NumberFormat numberFormat = NumberFormat . getInstance ( ) ; numberFormat . setMinimumIntegerDigits ( 5 ) ; numberFormat . setGroupingUsed ( false ) ; return name + "-" + taskType + "-" + numberFormat . format ( partition ) ; } | Helper function to generate a name that is unique for the task . |
33,564 | synchronized void openInfo ( ) throws IOException { if ( src == null && blocks == null ) { throw new IOException ( "No file provided to open" ) ; } LocatedBlocks newInfo = src != null ? getLocatedBlocks ( src , 0 , prefetchSize ) : blocks ; if ( newInfo == null ) { throw new FileNotFoundException ( "Cannot open filename " + src ) ; } if ( locatedBlocks != null && ! locatedBlocks . isUnderConstruction ( ) && ! newInfo . isUnderConstruction ( ) ) { Iterator < LocatedBlock > oldIter = locatedBlocks . getLocatedBlocksCopy ( ) . iterator ( ) ; Iterator < LocatedBlock > newIter = newInfo . getLocatedBlocks ( ) . iterator ( ) ; while ( oldIter . hasNext ( ) && newIter . hasNext ( ) ) { if ( ! oldIter . next ( ) . getBlock ( ) . equals ( newIter . next ( ) . getBlock ( ) ) ) { throw new IOException ( "Blocklist for " + src + " has changed!" ) ; } } } if ( newInfo . isUnderConstruction ( ) && newInfo . locatedBlockCount ( ) > 0 ) { LocatedBlock last = newInfo . get ( newInfo . locatedBlockCount ( ) - 1 ) ; if ( last . getLocations ( ) . length > 0 ) { try { Block newBlock = getBlockInfo ( last ) ; if ( newBlock != null ) { long newBlockSize = newBlock . getNumBytes ( ) ; newInfo . setLastBlockSize ( newBlock . getBlockId ( ) , newBlockSize ) ; } } catch ( IOException e ) { DFSClient . LOG . debug ( "DFSClient file " + src + " is being concurrently append to" + " but datanodes probably does not have block " + last . getBlock ( ) , e ) ; } } } this . locatedBlocks = new DFSLocatedBlocks ( newInfo , locatedBlockExpireTimeout ) ; this . currentNode = null ; if ( ! newInfo . isUnderConstruction ( ) ) { isCurrentBlockUnderConstruction = false ; } } | Grab the open - file info from namenode |
33,565 | private Block getBlockInfo ( LocatedBlock locatedblock ) throws IOException { if ( locatedblock == null || locatedblock . getLocations ( ) . length == 0 ) { return null ; } int replicaNotFoundCount = locatedblock . getLocations ( ) . length ; for ( DatanodeInfo datanode : locatedblock . getLocations ( ) ) { ProtocolProxy < ClientDatanodeProtocol > cdp = null ; try { cdp = DFSClient . createClientDNProtocolProxy ( datanode , dfsClient . conf , dfsClient . socketTimeout ) ; final Block newBlock ; if ( cdp . isMethodSupported ( "getBlockInfo" , int . class , Block . class ) ) { newBlock = cdp . getProxy ( ) . getBlockInfo ( dfsClient . namespaceId , locatedblock . getBlock ( ) ) ; } else { newBlock = cdp . getProxy ( ) . getBlockInfo ( locatedblock . getBlock ( ) ) ; } if ( newBlock == null ) { replicaNotFoundCount -- ; } else { return newBlock ; } } catch ( IOException ioe ) { if ( DFSClient . LOG . isDebugEnabled ( ) ) { DFSClient . LOG . debug ( "Failed to getBlockInfo from datanode " + datanode + " for block " + locatedblock . getBlock ( ) , ioe ) ; } } finally { if ( cdp != null ) { RPC . stopProxy ( cdp . getProxy ( ) ) ; } } } if ( replicaNotFoundCount == 0 ) { DFSClient . LOG . warn ( "Cannot get block info from any datanode having block " + locatedblock . getBlock ( ) + " for file " + src ) ; return null ; } throw new IOException ( "Cannot obtain block info for " + locatedblock ) ; } | Get block info from a datanode |
33,566 | private LocatedBlock getBlockAt ( long offset , boolean updatePosition , boolean throwWhenNotFound ) throws IOException { assert ( locatedBlocks != null ) : "locatedBlocks is null" ; locatedBlocks . blockLocationInfoExpiresIfNeeded ( ) ; LocatedBlock blk = locatedBlocks . getBlockContainingOffset ( offset ) ; if ( blk == null ) { LocatedBlocks newBlocks ; newBlocks = getLocatedBlocks ( src , offset , prefetchSize ) ; if ( newBlocks == null ) { if ( ! throwWhenNotFound ) { return null ; } throw new IOException ( "Could not find target position " + offset ) ; } locatedBlocks . insertRange ( newBlocks . getLocatedBlocks ( ) ) ; locatedBlocks . setFileLength ( newBlocks . getFileLength ( ) ) ; } blk = locatedBlocks . getBlockContainingOffset ( offset ) ; if ( blk == null ) { if ( ! throwWhenNotFound ) { return null ; } throw new IOException ( "Failed to determine location for block at " + "offset=" + offset ) ; } if ( updatePosition ) { this . pos = offset ; this . blockEnd = blk . getStartOffset ( ) + blk . getBlockSize ( ) - 1 ; this . currentBlock = blk . getBlock ( ) ; isCurrentBlockUnderConstruction = locatedBlocks . isUnderConstructionBlock ( this . currentBlock ) ; } return blk ; } | Get block at the specified position . Fetch it from the namenode if not cached . |
33,567 | private List < LocatedBlock > getBlockRange ( final long offset , final long length ) throws IOException { List < LocatedBlock > blockRange = new ArrayList < LocatedBlock > ( ) ; if ( length == 0 ) return blockRange ; int maxLoops = 10000 ; DFSLocatedBlocks locatedBlocks = this . locatedBlocks ; if ( locatedBlocks == null ) { throw new IOException ( "locatedBlocks is null" ) ; } locatedBlocks . blockLocationInfoExpiresIfNeeded ( ) ; long remaining = length ; long curOff = offset ; while ( remaining > 0 ) { if ( -- maxLoops < 0 ) { String msg = "Failed to getBlockRange at offset " + offset + ", length=" + length + ", curOff=" + curOff + ", remaining=" + remaining + ". Aborting..." ; DFSClient . LOG . warn ( msg ) ; throw new IOException ( msg ) ; } LocatedBlock blk = locatedBlocks . getBlockContainingOffset ( curOff ) ; if ( blk == null ) { LocatedBlocks newBlocks ; newBlocks = getLocatedBlocks ( src , curOff , remaining ) ; if ( newBlocks == null ) { throw new IOException ( "Could not get block locations for curOff=" + curOff + ", remaining=" + remaining + " (offset=" + offset + ")" ) ; } locatedBlocks . insertRange ( newBlocks . getLocatedBlocks ( ) ) ; continue ; } blockRange . add ( blk ) ; long bytesRead = blk . getStartOffset ( ) + blk . getBlockSize ( ) - curOff ; remaining -= bytesRead ; curOff += bytesRead ; } DFSClient . checkBlockRange ( blockRange , offset , length ) ; return blockRange ; } | Get blocks in the specified range . The locations of all blocks overlapping with the given segment of the file are retrieved . Fetch them from the namenode if not cached . |
33,568 | public synchronized void close ( ) throws IOException { try { if ( closed ) { return ; } dfsClient . checkOpen ( ) ; if ( blockReader != null ) { closeBlockReader ( blockReader , false ) ; blockReader = null ; } for ( BlockReaderLocalBase brl : localBlockReaders . values ( ) ) { try { brl . close ( ) ; } catch ( IOException ioe ) { DFSClient . LOG . warn ( "Error when closing local block reader" , ioe ) ; } } localBlockReaders = null ; super . close ( ) ; closed = true ; } finally { if ( dfsClient . blockLocationRenewal != null ) { dfsClient . blockLocationRenewal . remove ( this ) ; } } } | Close it down! |
33,569 | public synchronized int read ( byte buf [ ] , int off , int len ) throws IOException { dfsClient . checkOpen ( ) ; if ( closed ) { dfsClient . incReadExpCntToStats ( ) ; throw new IOException ( "Stream closed" ) ; } DFSClient . dfsInputStreamfailures . set ( 0 ) ; long start = System . currentTimeMillis ( ) ; if ( pos < getFileLength ( ) || ( pos == getFileLength ( ) && len == 0 ) ) { int retries = 2 ; while ( retries > 0 ) { try { if ( len == 0 ) { if ( blockEnd == - 1 || pos == getFileLength ( ) ) { currentNode = blockSeekTo ( pos , false ) ; if ( currentNode == null ) { return 0 ; } } else { throw new IOException ( "Try to read 0 bytes while current position is not the end of the file" ) ; } } else if ( pos > blockEnd || ( this . isCurrentBlockUnderConstruction && blockReader != null && blockReader . eos && blockReader . available ( ) == 0 ) ) { currentNode = blockSeekTo ( pos , true ) ; } int realLen = ( int ) Math . min ( ( long ) len , ( blockEnd - pos + 1L ) ) ; int result = readBuffer ( buf , off , realLen ) ; if ( result >= 0 ) { pos += result ; } else if ( len != 0 ) { throw new IOException ( "Unexpected EOS from the reader" ) ; } if ( dfsClient . stats != null && result != - 1 ) { dfsClient . stats . incrementBytesRead ( result ) ; } long timeval = System . currentTimeMillis ( ) - start ; dfsClient . metrics . incReadTime ( timeval ) ; dfsClient . metrics . incReadSize ( result ) ; dfsClient . metrics . incReadOps ( ) ; return ( result >= 0 ) ? result : 0 ; } catch ( InterruptedIOException iie ) { throw iie ; } catch ( ChecksumException ce ) { dfsClient . incReadExpCntToStats ( ) ; throw ce ; } catch ( IOException e ) { dfsClient . incReadExpCntToStats ( ) ; if ( retries == 1 ) { DFSClient . LOG . warn ( "DFS Read: " + StringUtils . stringifyException ( e ) ) ; } blockEnd = - 1 ; if ( currentNode != null ) { addToDeadNodes ( currentNode ) ; } if ( -- retries == 0 ) { if ( len != 0 ) { throw e ; } else { return 0 ; } } } } } return - 1 ; } | Read the entire buffer . |
33,570 | private void closeBlockReader ( BlockReader reader , boolean reuseConnection ) throws IOException { if ( reader . hasSentStatusCode ( ) ) { Socket oldSock = reader . takeSocket ( ) ; if ( dfsClient . getDataTransferProtocolVersion ( ) < DataTransferProtocol . READ_REUSE_CONNECTION_VERSION || ! reuseConnection ) { if ( oldSock != null ) { IOUtils . closeSocket ( oldSock ) ; } } else { socketCache . put ( oldSock ) ; } } reader . close ( ) ; } | Close the given BlockReader and cache its socket . |
33,571 | protected BlockReader getBlockReader ( int protocolVersion , int namespaceId , InetSocketAddress dnAddr , String file , long blockId , long generationStamp , long startOffset , long len , int bufferSize , boolean verifyChecksum , String clientName , long bytesToCheckReadSpeed , long minReadSpeedBps , boolean reuseConnection , FSClientReadProfilingData cliData , ReadOptions options ) throws IOException { IOException err = null ; boolean fromCache = true ; if ( protocolVersion < DataTransferProtocol . READ_REUSE_CONNECTION_VERSION || reuseConnection == false ) { Socket sock = dfsClient . socketFactory . createSocket ( ) ; sock . setTcpNoDelay ( true ) ; NetUtils . connect ( sock , dnAddr , dfsClient . socketTimeout ) ; sock . setSoTimeout ( dfsClient . socketTimeout ) ; BlockReader reader = BlockReader . newBlockReader ( protocolVersion , namespaceId , sock , src , blockId , generationStamp , startOffset , len , buffersize , verifyChecksum , clientName , bytesToCheckReadSpeed , minReadSpeedBps , false , cliData , options ) ; return reader ; } for ( int retries = 0 ; retries <= nCachedConnRetry && fromCache ; ++ retries ) { Socket sock = socketCache . get ( dnAddr ) ; if ( sock == null ) { fromCache = false ; sock = dfsClient . socketFactory . createSocket ( ) ; sock . setTcpNoDelay ( true ) ; NetUtils . connect ( sock , dnAddr , dfsClient . socketTimeout ) ; sock . setSoTimeout ( dfsClient . socketTimeout ) ; } try { BlockReader reader = BlockReader . newBlockReader ( protocolVersion , namespaceId , sock , src , blockId , generationStamp , startOffset , len , buffersize , verifyChecksum , clientName , bytesToCheckReadSpeed , minReadSpeedBps , true , cliData , options ) ; return reader ; } catch ( IOException ex ) { DFSClient . LOG . debug ( "Error making BlockReader. Closing stale " + sock , ex ) ; sock . close ( ) ; err = ex ; } } throw err ; } | Retrieve a BlockReader suitable for reading . This method will reuse the cached connection to the DN if appropriate . Otherwise it will create a new connection . |
33,572 | public int read ( long position , byte [ ] buffer , int offset , int length , ReadOptions options ) throws IOException { dfsClient . checkOpen ( ) ; if ( closed ) { throw new IOException ( "Stream closed" ) ; } DFSClient . dfsInputStreamfailures . set ( 0 ) ; long start = System . currentTimeMillis ( ) ; long filelen = getFileLength ( ) ; if ( ( position < 0 ) || ( position >= filelen ) ) { return - 1 ; } int realLen = length ; if ( ( position + length ) > filelen ) { realLen = ( int ) ( filelen - position ) ; } DFSReadProfilingData pData = DFSClient . getDFSReadProfilingData ( ) ; List < LocatedBlock > blockRange = getBlockRange ( position , realLen ) ; if ( ! tryPreadFromLocal ( blockRange , position , buffer , offset , length , realLen , start ) ) { int remaining = realLen ; for ( LocatedBlock blk : blockRange ) { long targetStart = position - blk . getStartOffset ( ) ; long bytesToRead = Math . min ( remaining , blk . getBlockSize ( ) - targetStart ) ; if ( dfsClient . allowParallelReads && dfsClient . parallelReadsThreadPool != null ) { fetchBlockByteRangeSpeculative ( blk , targetStart , targetStart + bytesToRead - 1 , buffer , offset , options ) ; } else { if ( pData != null ) { cliData = new FSClientReadProfilingData ( ) ; pData . addDFSClientReadProfilingData ( cliData ) ; cliData . startRead ( ) ; } fetchBlockByteRange ( blk , targetStart , targetStart + bytesToRead - 1 , buffer , offset , options ) ; if ( pData != null ) { cliData . endRead ( ) ; } } remaining -= bytesToRead ; position += bytesToRead ; offset += bytesToRead ; } assert remaining == 0 : "Wrong number of bytes read." ; } if ( dfsClient . stats != null ) { dfsClient . stats . incrementBytesRead ( realLen ) ; } long timeval = System . currentTimeMillis ( ) - start ; dfsClient . metrics . incPreadTime ( timeval ) ; dfsClient . metrics . incPreadSize ( realLen ) ; dfsClient . metrics . incPreadOps ( ) ; return realLen ; } | Read bytes starting from the specified position . |
33,573 | public List < ByteBuffer > readFullyScatterGather ( long position , int length ) throws IOException { if ( dfsClient . dataTransferVersion < DataTransferProtocol . SCATTERGATHER_VERSION ) { return super . readFullyScatterGather ( position , length ) ; } dfsClient . checkOpen ( ) ; if ( closed ) { throw new IOException ( "Stream closed" ) ; } DFSClient . dfsInputStreamfailures . set ( 0 ) ; long start = System . currentTimeMillis ( ) ; long filelen = getFileLength ( ) ; if ( ( position < 0 ) || ( position > filelen ) ) { String msg = " Invalid position " + position + ". File " + src + " is of size " + filelen ; DFSClient . LOG . warn ( msg ) ; throw new IOException ( msg ) ; } List < ByteBuffer > results = new LinkedList < ByteBuffer > ( ) ; int realLen = length ; if ( ( position + length ) > filelen ) { realLen = ( int ) ( filelen - position ) ; } List < LocatedBlock > blockRange = getBlockRange ( position , realLen ) ; int remaining = realLen ; for ( LocatedBlock blk : blockRange ) { long targetStart = position - blk . getStartOffset ( ) ; long bytesToRead = Math . min ( remaining , blk . getBlockSize ( ) - targetStart ) ; ByteBuffer bb = fetchBlockByteRangeScatterGather ( blk , targetStart , bytesToRead ) ; results . add ( bb ) ; remaining -= bytesToRead ; position += bytesToRead ; } assert remaining == 0 : "Wrong number of bytes read." ; if ( dfsClient . stats != null ) { dfsClient . stats . incrementBytesRead ( realLen ) ; } long timeval = System . currentTimeMillis ( ) - start ; dfsClient . metrics . incPreadTime ( timeval ) ; dfsClient . metrics . incPreadSize ( realLen ) ; dfsClient . metrics . incPreadOps ( ) ; return results ; } | Read bytes starting from the specified position . This is optimized for fast preads from an application with minimum of buffer copies . |
33,574 | public synchronized void seek ( long targetPos ) throws IOException { if ( targetPos > getFileLength ( ) ) { throw new IOException ( "Cannot seek after EOF" ) ; } boolean done = false ; if ( pos <= targetPos && targetPos <= blockEnd ) { int diff = ( int ) ( targetPos - pos ) ; if ( diff <= DFSClient . TCP_WINDOW_SIZE ) { try { pos += blockReader . skip ( diff ) ; if ( pos == targetPos ) { done = true ; } } catch ( IOException e ) { dfsClient . incReadExpCntToStats ( ) ; DFSClient . LOG . debug ( "Exception while seek to " + targetPos + " from " + currentBlock + " of " + src + " from " + currentNode + ": " + StringUtils . stringifyException ( e ) ) ; } } else { if ( DFSClient . LOG . isDebugEnabled ( ) ) { DFSClient . LOG . debug ( "seek() out of TCP buffer " + " block " + currentBlock + " current pos: " + pos + " target pos: " + targetPos ) ; } } } if ( ! done ) { pos = targetPos ; blockEnd = - 1 ; } } | Seek to a new arbitrary location |
33,575 | public synchronized boolean seekToNewSource ( long targetPos , boolean throwWhenNotFound ) throws IOException { boolean markedDead = deadNodes . containsKey ( currentNode ) ; addToDeadNodes ( currentNode ) ; DatanodeInfo oldNode = currentNode ; DatanodeInfo newNode = blockSeekTo ( targetPos , throwWhenNotFound ) ; if ( ! markedDead ) { deadNodes . remove ( oldNode ) ; } if ( ! oldNode . getStorageID ( ) . equals ( newNode . getStorageID ( ) ) ) { currentNode = newNode ; return true ; } else { return false ; } } | Seek to given position on a node other than the current node . If a node other than the current node is found then returns true . If another node could not be found then returns false . |
33,576 | private boolean isTaskRunning ( TaskStatus taskStatus ) { TaskStatus . State state = taskStatus . getRunState ( ) ; return ( state == State . RUNNING || state == State . UNASSIGNED || taskStatus . inTaskCleanupPhase ( ) ) ; } | Is the given task considered as running ? |
33,577 | public int countMapTasks ( ) { int mapCount = 0 ; for ( Iterator < TaskStatus > it = taskReports . iterator ( ) ; it . hasNext ( ) ; ) { TaskStatus ts = ( TaskStatus ) it . next ( ) ; if ( ts . getIsMap ( ) && isTaskRunning ( ts ) ) { mapCount ++ ; } } return mapCount ; } | Get the number of running map tasks . |
33,578 | public int countOccupiedMapSlots ( ) { int mapSlotsCount = 0 ; for ( Iterator < TaskStatus > it = taskReports . iterator ( ) ; it . hasNext ( ) ; ) { TaskStatus ts = ( TaskStatus ) it . next ( ) ; if ( ts . getIsMap ( ) && isTaskRunning ( ts ) ) { mapSlotsCount += ts . getNumSlots ( ) ; } } return mapSlotsCount ; } | Get the number of occupied map slots . |
33,579 | public int countReduceTasks ( ) { int reduceCount = 0 ; for ( Iterator < TaskStatus > it = taskReports . iterator ( ) ; it . hasNext ( ) ; ) { TaskStatus ts = ( TaskStatus ) it . next ( ) ; if ( ( ! ts . getIsMap ( ) ) && isTaskRunning ( ts ) ) { reduceCount ++ ; } } return reduceCount ; } | Get the number of running reduce tasks . |
33,580 | public int countOccupiedReduceSlots ( ) { int reduceSlotsCount = 0 ; for ( Iterator < TaskStatus > it = taskReports . iterator ( ) ; it . hasNext ( ) ; ) { TaskStatus ts = ( TaskStatus ) it . next ( ) ; if ( ( ! ts . getIsMap ( ) ) && isTaskRunning ( ts ) ) { reduceSlotsCount += ts . getNumSlots ( ) ; } } return reduceSlotsCount ; } | Get the number of occupied reduce slots . |
33,581 | public static BinaryRecordInput get ( DataInput inp ) { BinaryRecordInput bin = ( BinaryRecordInput ) bIn . get ( ) ; bin . setDataInput ( inp ) ; return bin ; } | Get a thread - local record input for the supplied DataInput . |
33,582 | public boolean canLeave ( ) { if ( reached == 0 ) { return false ; } if ( namesystem . now ( ) - reached < extension ) { reportStatus ( "STATE* Safe mode ON." , false ) ; return false ; } return ! needEnter ( ) ; } | Safe mode can be turned off iff the threshold is reached and the extension time have passed . |
33,583 | private void reportStatus ( String msg , boolean rightNow ) { long curTime = FSNamesystem . now ( ) ; if ( ! rightNow && ( curTime - lastStatusReport < 20 * 1000 ) ) { return ; } FLOG . info ( msg + " \n" + getTurnOffTip ( ) ) ; lastStatusReport = curTime ; } | Print status every 20 seconds . |
33,584 | private boolean isConsistent ( ) { if ( this . reached < 0 ) { return true ; } if ( namesystem . getTotalBlocks ( ) == - 1 && namesystem . getSafeBlocks ( ) == - 1 ) { return true ; } long activeBlocks = namesystem . getBlocksTotal ( ) - namesystem . getPendingDeletionBlocks ( ) ; return ( namesystem . getTotalBlocks ( ) == activeBlocks ) || ( namesystem . getSafeBlocks ( ) >= 0 && namesystem . getSafeBlocks ( ) <= namesystem . getTotalBlocks ( ) ) ; } | Checks consistency of the class state . This is costly and currently called only in assert . |
33,585 | public boolean nextKey ( ) throws IOException , InterruptedException { while ( hasMore && nextKeyIsSame ) { nextKeyValue ( ) ; } if ( hasMore ) { if ( inputKeyCounter != null ) { inputKeyCounter . increment ( 1 ) ; } return nextKeyValue ( ) ; } else { return false ; } } | Start processing next unique key . |
33,586 | static FSImageCompression createCompression ( Configuration conf , boolean forceUncompressed ) throws IOException { boolean compressImage = ( ! forceUncompressed ) && conf . getBoolean ( HdfsConstants . DFS_IMAGE_COMPRESS_KEY , HdfsConstants . DFS_IMAGE_COMPRESS_DEFAULT ) ; if ( ! compressImage ) { return createNoopCompression ( ) ; } String codecClassName = conf . get ( HdfsConstants . DFS_IMAGE_COMPRESSION_CODEC_KEY , HdfsConstants . DFS_IMAGE_COMPRESSION_CODEC_DEFAULT ) ; return createCompression ( conf , codecClassName ) ; } | Create a compression instance based on the user s configuration in the given Configuration object . |
33,587 | public static FSImageCompression readCompressionHeader ( Configuration conf , DataInputStream dis ) throws IOException { boolean isCompressed = dis . readBoolean ( ) ; if ( ! isCompressed ) { return createNoopCompression ( ) ; } else { String codecClassName = Text . readString ( dis ) ; return createCompression ( conf , codecClassName ) ; } } | Create a compression instance based on a header read from an input stream . |
33,588 | public InputStream unwrapInputStream ( InputStream is ) throws IOException { if ( imageCodec != null ) { return imageCodec . createInputStream ( is ) ; } else { return is ; } } | Unwrap a compressed input stream by wrapping it with a decompressor based on this codec . If this instance represents no compression simply return the input stream . |
33,589 | DataOutputStream writeHeaderAndWrapStream ( OutputStream os ) throws IOException { DataOutputStream dos = new DataOutputStream ( os ) ; dos . writeBoolean ( imageCodec != null ) ; if ( imageCodec != null ) { String codecClassName = imageCodec . getClass ( ) . getCanonicalName ( ) ; Text . writeString ( dos , codecClassName ) ; return new DataOutputStream ( imageCodec . createOutputStream ( os ) ) ; } else { return new DataOutputStream ( new BufferedOutputStream ( os ) ) ; } } | Write out a header to the given stream that indicates the chosen compression codec and return the same stream wrapped with that codec . If no codec is specified simply adds buffering to the stream so that the returned stream is always buffered . |
33,590 | public static TaskID downgrade ( org . apache . hadoop . mapreduce . TaskID old ) { if ( old instanceof TaskID ) { return ( TaskID ) old ; } else { return new TaskID ( JobID . downgrade ( old . getJobID ( ) ) , old . isMap ( ) , old . getId ( ) ) ; } } | Downgrade a new TaskID to an old one |
33,591 | private void quiesceIngestWithReprocess ( ) throws IOException { if ( ingest != null ) { LOG . info ( "Standby: Quiescing - quiescing ongoing ingest" ) ; quiesceIngest ( ) ; reprocessCurrentSegmentIfNeeded ( ingest . getIngestStatus ( ) ) ; } } | When ingest consumes the end of segment transaction it sets the state to not ingesting . This function ensures that the ingest thread exited . |
33,592 | private void quiesceIngest ( ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . STANDBY_QUIESCE_INGEST ) ; synchronized ( ingestStateLock ) { assertState ( StandbyIngestState . INGESTING_EDITS , StandbyIngestState . NOT_INGESTING ) ; ingest . quiesce ( ) ; } try { ingestThread . join ( ) ; currentIngestState = StandbyIngestState . NOT_INGESTING ; LOG . info ( "Standby: Quiesce - Ingest thread for segment: " + ingest . toString ( ) + " exited." ) ; } catch ( InterruptedException e ) { LOG . info ( "Standby: Quiesce - Ingest thread interrupted." ) ; throw new IOException ( e . getMessage ( ) ) ; } } | Quiesces the currently running ingest |
33,593 | private void instantiateIngest ( ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . STANDBY_INSTANTIATE_INGEST ) ; try { synchronized ( ingestStateLock ) { if ( checkIngestState ( ) ) { LOG . info ( "Standby: Ingest for txid: " + currentSegmentTxId + " is already running" ) ; return ; } assertState ( StandbyIngestState . NOT_INGESTING ) ; ingest = new Ingest ( this , fsnamesys , confg , currentSegmentTxId ) ; ingestThread = new Thread ( ingest ) ; ingestThread . setName ( "Ingest_for_" + currentSegmentTxId ) ; ingestThread . start ( ) ; currentIngestState = StandbyIngestState . INGESTING_EDITS ; } LOG . info ( "Standby: Instatiated ingest for txid: " + currentSegmentTxId ) ; } catch ( IOException e ) { setIngestFailures ( ingestFailures + 1 ) ; currentIngestState = StandbyIngestState . NOT_INGESTING ; throw e ; } } | Instantiates ingest thread for the current edits segment . |
33,594 | private void reprocessCurrentSegmentIfNeeded ( boolean status ) throws IOException { if ( status ) { return ; } assertState ( StandbyIngestState . NOT_INGESTING ) ; LOG . info ( "Standby: Quiesce - reprocessing edits segment starting at: " + currentSegmentTxId ) ; instantiateIngest ( ) ; quiesceIngest ( ) ; if ( ! ingest . getIngestStatus ( ) ) { String emsg = "Standby: Quiesce could not successfully ingest " + "transaction log starting at " + currentSegmentTxId ; LOG . warn ( emsg ) ; setIngestFailures ( ingestFailures + 1 ) ; throw new IOException ( emsg ) ; } } | Processes previously consumed edits segment if needed |
33,595 | void triggerCheckpoint ( boolean uncompressed ) throws IOException { String pref = "Standby: Checkpoint - " ; LOG . info ( pref + "triggering checkpoint manually" ) ; if ( uncompressed ) { throwIOException ( pref + " uncompressed option not supported" , null ) ; } if ( manualCheckpointLatch . getCount ( ) > 0 ) { throwIOException ( pref + "Another manual checkpoint is in progress" , null ) ; } manualCheckpointLatch = new CountDownLatch ( 2 ) ; lastCheckpointTime = delayedScheduledCheckpointTime = 0 ; try { manualCheckpointLatch . await ( ) ; } catch ( InterruptedException e ) { throwIOException ( pref + "interrupted when performing manual checkpoint" , e ) ; } if ( checkpointFailures > 0 ) { throwIOException ( pref + "manual checkpoint failed" , null ) ; } LOG . info ( pref + "manual checkpoint done" ) ; } | Trigger checkpoint . If there is an ongoing scheduled checkpoint this call will trigger a checkpoint immediately after . The method blocks until the checkpoint is done . |
33,596 | private void handleCheckpointFailure ( ) { setCheckpointFailures ( checkpointFailures + 1 ) ; if ( checkpointFailures > MAX_CHECKPOINT_FAILURES ) { LOG . fatal ( "Standby: Checkpointing - standby failed to checkpoint in " + checkpointFailures + " attempts. Aborting" ) ; } else { LOG . info ( "Sleeping " + CHECKPOINT_SLEEP_BEFORE_RETRY + " msecs before retry checkpoints..." ) ; try { Thread . sleep ( CHECKPOINT_SLEEP_BEFORE_RETRY ) ; return ; } catch ( InterruptedException ie ) { LOG . warn ( "Standby: Checkpointing - Thread interrupted" + " while sleeping before a retry." , ie ) ; } } FSEditLog . runtime . exit ( - 1 ) ; } | If checkpoint fails continuously we want to abort the standby . We want to avoid the situation in which the standby continuously rolls edit log on the primary without finalizing checkpoint . |
33,597 | private void uploadImage ( long txid ) throws IOException { final long start = AvatarNode . now ( ) ; LOG . info ( "Standby: Checkpointing - Upload fsimage to remote namenode." ) ; checkpointStatus ( "Image upload started" ) ; imageUploader = new ImageUploader ( txid ) ; imageUploader . start ( ) ; while ( running && ! imageUploader . done && AvatarNode . now ( ) - start < MAX_CHECKPOINT_UPLOAD_TIMEOUT ) { try { imageUploader . join ( 3000 ) ; } catch ( InterruptedException ie ) { LOG . error ( "Reveived interruption when uploading image for txid: " + txid ) ; Thread . currentThread ( ) . interrupt ( ) ; throw ( IOException ) new InterruptedIOException ( ) . initCause ( ie ) ; } } if ( ! running || ! imageUploader . succeeded ) { InjectionHandler . processEvent ( InjectionEvent . STANDBY_UPLOAD_FAIL ) ; throw new IOException ( "Standby: Checkpointing - Image upload failed (time= " + ( AvatarNode . now ( ) - start ) + " ms)." , imageUploader . error ) ; } imageUploader = null ; LOG . info ( "Standby: Checkpointing - Upload fsimage to remote namenode DONE." ) ; checkpointStatus ( "Image upload completed" ) ; } | Creates image upload thread . |
33,598 | private void putFSImage ( long txid ) throws IOException { TransferFsImage . uploadImageFromStorage ( fsName , machineName , infoPort , fsImage . storage , txid ) ; } | Copy the new fsimage into the NameNode |
33,599 | private void checkImageValidation ( ) throws IOException { try { imageValidator . join ( ) ; } catch ( InterruptedException ie ) { throw ( IOException ) new InterruptedIOException ( ) . initCause ( ie ) ; } if ( ! imageValidator . succeeded ) { throw new IOException ( "Image file validation failed" , imageValidator . error ) ; } } | Checks the status of image validation during checkpoint . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.