idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,200
public void setUrl ( String url ) throws IOException { if ( failException != null ) { throw failException ; } sessionInfo . url = url ; SessionInfo newInfo = new SessionInfo ( sessionInfo ) ; cmNotifier . addCall ( new ClusterManagerService . sessionUpdateInfo_args ( sessionId , newInfo ) ) ; }
Set the URL for this session in the ClusterManager
32,201
public void stopRemoteSession ( String remoteId ) { cmNotifier . addCall ( new ClusterManagerService . sessionEnd_args ( remoteId , SessionStatus . TIMED_OUT ) ) ; }
Stops session acquired by remote JT
32,202
public void stop ( SessionStatus status , List < ResourceType > resourceTypes , List < NodeUsageReport > reportList ) { LOG . info ( "Stopping session driver" ) ; running = false ; cmNotifier . clearCalls ( ) ; if ( reportList != null && ! reportList . isEmpty ( ) ) { cmNotifier . addCall ( new ClusterManagerService . nodeFeedback_args ( sessionId , resourceTypes , reportList ) ) ; } cmNotifier . addCall ( new ClusterManagerService . sessionEnd_args ( sessionId , status ) ) ; cmNotifier . doShutdown ( ) ; server . stop ( ) ; incomingCallExecutor . interrupt ( ) ; }
Stop the SessionDriver . This sends the message to the ClusterManager indicating that the session has ended . If reportList is not null or empty it will send the report prior to closing the session .
32,203
public void join ( ) throws InterruptedException { serverThread . join ( ) ; long start = System . currentTimeMillis ( ) ; cmNotifier . join ( SESSION_DRIVER_WAIT_INTERVAL ) ; long end = System . currentTimeMillis ( ) ; if ( end - start >= SESSION_DRIVER_WAIT_INTERVAL ) { LOG . warn ( "Taking more than " + SESSION_DRIVER_WAIT_INTERVAL + " for cmNotifier to die" ) ; } incomingCallExecutor . join ( ) ; }
Join the underlying threads of SessionDriver
32,204
public void requestResources ( List < ResourceRequest > wanted ) throws IOException { if ( failException != null ) { throw failException ; } cmNotifier . addCall ( new ClusterManagerService . requestResource_args ( sessionId , wanted ) ) ; }
Request needed resources from the ClusterManager
32,205
public void releaseResources ( List < ResourceRequest > released ) throws IOException { if ( failException != null ) { throw failException ; } List < Integer > releasedIds = new ArrayList < Integer > ( ) ; for ( ResourceRequest req : released ) { releasedIds . add ( req . getId ( ) ) ; } cmNotifier . addCall ( new ClusterManagerService . releaseResource_args ( sessionId , releasedIds ) ) ; }
Release the resources that are no longer used
32,206
public static Object createInstance ( String className ) { Object retv = null ; try { ClassLoader classLoader = Thread . currentThread ( ) . getContextClassLoader ( ) ; Class < ? > theFilterClass = Class . forName ( className , true , classLoader ) ; Constructor meth = theFilterClass . getDeclaredConstructor ( argArray ) ; meth . setAccessible ( true ) ; retv = meth . newInstance ( ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } return retv ; }
Create an instance of the given class
32,207
public static void set ( long [ ] bits , int pos ) { int offset = pos >> LONG_SHIFT ; if ( offset >= bits . length ) throw new IndexOutOfBoundsException ( ) ; bits [ offset ] |= 1L << pos ; }
Set the bit for the given position to true .
32,208
public static void clear ( long [ ] bits , int pos ) { int offset = pos >> LONG_SHIFT ; if ( offset >= bits . length ) throw new IndexOutOfBoundsException ( ) ; bits [ offset ] &= ~ ( 1L << pos ) ; }
Set the bit for the given position to false .
32,209
public static boolean get ( long [ ] bits , int pos ) { int offset = pos >> LONG_SHIFT ; if ( offset >= bits . length ) return false ; return ( bits [ offset ] & ( 1L << pos ) ) != 0 ; }
Gets the bit for the given position .
32,210
public static int cardinality ( long [ ] bits ) { int card = 0 ; for ( int i = bits . length - 1 ; i >= 0 ; i -- ) { long a = bits [ i ] ; if ( a == 0 ) continue ; if ( a == - 1 ) { card += 64 ; continue ; } a = ( ( a >> 1 ) & 0x5555555555555555L ) + ( a & 0x5555555555555555L ) ; a = ( ( a >> 2 ) & 0x3333333333333333L ) + ( a & 0x3333333333333333L ) ; int b = ( int ) ( ( a >>> 32 ) + a ) ; b = ( ( b >> 4 ) & 0x0f0f0f0f ) + ( b & 0x0f0f0f0f ) ; b = ( ( b >> 8 ) & 0x00ff00ff ) + ( b & 0x00ff00ff ) ; card += ( ( b >> 16 ) & 0x0000ffff ) + ( b & 0x0000ffff ) ; } return card ; }
Checks the number of bits set to 1 .
32,211
void checkState ( ) throws IOException { int majority = getMajoritySize ( ) ; int numDisabled = 0 ; for ( HttpImageUploadChannel ch : uploadChannels ) { numDisabled += ch . isDisabled ( ) ? 1 : 0 ; } if ( numDisabled >= majority ) { Map < HttpImageUploadChannel , Void > successes = new HashMap < HttpImageUploadChannel , Void > ( ) ; Map < HttpImageUploadChannel , Throwable > exceptions = new HashMap < HttpImageUploadChannel , Throwable > ( ) ; for ( HttpImageUploadChannel ch : uploadChannels ) { if ( ch . isDisabled ( ) ) { exceptions . put ( ch , ch . getErrorStatus ( ) ) ; } else { successes . put ( ch , null ) ; } } throw QuorumException . create ( "Failed when uploading" , successes , exceptions ) ; } }
At each operation this function is used to ensure that we still have a majority of successful channels to which we can write .
32,212
private void flushBuffer ( boolean close ) { for ( HttpImageUploadChannel ch : uploadChannels ) { ch . send ( buffer ) ; } if ( ! close ) { buffer = new ByteArrayOutputStream ( ( int ) ( 1.2 * flushSize ) ) ; } }
Flushes the buffer to the upload channels and allocates a new buffer .
32,213
public synchronized FileSystem getFs ( ) throws IOException { if ( this . fs == null ) { Path sysDir = getSystemDir ( ) ; this . fs = sysDir . getFileSystem ( getConf ( ) ) ; } return fs ; }
Get a filesystem handle . We need this to prepare jobs for submission to the MapReduce system .
32,214
private Path copyRemoteFiles ( FileSystem jtFs , Path parentDir , Path originalPath , JobConf job , short replication , String md5 ) throws IOException { FileSystem remoteFs = null ; remoteFs = originalPath . getFileSystem ( job ) ; if ( compareFs ( remoteFs , jtFs ) ) { return originalPath ; } if ( md5 != null ) { Path basePath = parentDir ; Path realPath = new Path ( basePath , md5 + "_" + originalPath . getName ( ) ) ; Path qualifiedRealPath = realPath . makeQualified ( jtFs ) ; if ( filesInCache . contains ( qualifiedRealPath ) ) { if ( r . nextLong ( ) % 10 == 0 ) { try { jtFs . setTimes ( realPath , - 1 , System . currentTimeMillis ( ) ) ; } catch ( RemoteException e ) { LOG . warn ( "Error in setTimes" , e ) ; } } return qualifiedRealPath ; } Path newPath ; do { newPath = new Path ( basePath , "tmp_" + originalPath . getName ( ) + r . nextLong ( ) ) ; } while ( jtFs . exists ( newPath ) ) ; FileUtil . copy ( remoteFs , originalPath , jtFs , newPath , false , job ) ; jtFs . setReplication ( newPath , replication ) ; jtFs . setPermission ( newPath , new FsPermission ( JOB_DIR_PERMISSION ) ) ; LOG . info ( "Uploading new shared jar: " + realPath . toString ( ) ) ; if ( ! jtFs . rename ( newPath , realPath ) ) { if ( ! jtFs . exists ( realPath ) ) throw new IOException ( "Unable to upload or find shared jar: " + realPath . toString ( ) ) ; } filesInCache . add ( qualifiedRealPath ) ; return qualifiedRealPath ; } Path newPath = new Path ( parentDir , originalPath . getName ( ) ) ; FileUtil . copy ( remoteFs , originalPath , jtFs , newPath , false , job ) ; jtFs . setReplication ( newPath , replication ) ; return jtFs . makeQualified ( newPath ) ; }
was copied to
32,215
private void symLinkAndConfigureFiles ( JobConf job ) throws IOException { if ( ! ( job . getBoolean ( "mapred.used.genericoptionsparser" , false ) ) ) { LOG . warn ( "Use GenericOptionsParser for parsing the arguments. " + "Applications should implement Tool for the same." ) ; } String files = job . get ( "tmpfiles" ) ; String archives = job . get ( "tmparchives" ) ; List < String > filesToSymLink = new ArrayList < String > ( ) ; splitAndAdd ( files , filesToSymLink ) ; splitAndAdd ( archives , filesToSymLink ) ; for ( String file : filesToSymLink ) { String target = new Path ( file ) . toUri ( ) . getPath ( ) ; String basename = new File ( target ) . getName ( ) ; String linkName = new File ( "." ) . getAbsolutePath ( ) + File . separator + basename ; File toLink = new File ( linkName ) ; if ( toLink . exists ( ) ) { LOG . info ( "Symlink " + linkName + " already exists. Delete it." ) ; toLink . delete ( ) ; } int ret = FileUtil . symLink ( target , linkName ) ; LOG . info ( "Creating symlink " + linkName + " -> " + target + " returns " + ret + "." ) ; } String originalJar = job . getJar ( ) ; if ( originalJar != null ) { if ( "" . equals ( job . getJobName ( ) ) ) { job . setJobName ( new Path ( originalJar ) . getName ( ) ) ; } } configureUserName ( job ) ; }
Create symlinks for the files needed for the jobs in current directory
32,216
private void configureUserName ( JobConf job ) throws IOException { UnixUserGroupInformation ugi = getUGI ( job ) ; job . setUser ( ugi . getUserName ( ) ) ; if ( ugi . getGroupNames ( ) != null && ugi . getGroupNames ( ) . length > 0 ) { job . set ( "group.name" , ugi . getGroupNames ( ) [ 0 ] ) ; } if ( job . getWorkingDirectory ( ) == null ) { job . setWorkingDirectory ( fs . getWorkingDirectory ( ) ) ; } }
set this user s id in job configuration so later job files can be accessed using this user s id
32,217
public RunningJob submitJob ( String jobFile ) throws FileNotFoundException , InvalidJobConfException , IOException { JobConf job = new JobConf ( jobFile ) ; return submitJob ( job ) ; }
Submit a job to the MR system .
32,218
public RunningJob submitJobInternal ( JobConf job ) throws FileNotFoundException , ClassNotFoundException , InterruptedException , IOException { boolean shared = job . getBoolean ( "mapred.cache.shared.enabled" , false ) ; JobID jobId = jobSubmitClient . getNewJobId ( ) ; Path submitJobDir = new Path ( getSystemDir ( ) , jobId . toString ( ) ) ; Path sharedFilesDir = new Path ( getSystemDir ( ) , jobSubmitClient . CAR ) ; Path submitSplitFile = new Path ( submitJobDir , "job.split" ) ; getFs ( ) ; if ( jobSubmitClient instanceof LocalJobRunner ) { symLinkAndConfigureFiles ( job ) ; } else { copyAndConfigureFiles ( job , ( shared ) ? sharedFilesDir : submitJobDir , shared ) ; } Path submitJobFile = new Path ( submitJobDir , "job.xml" ) ; int reduces = job . getNumReduceTasks ( ) ; JobContext context = new JobContext ( job , jobId ) ; if ( reduces == 0 ? job . getUseNewMapper ( ) : job . getUseNewReducer ( ) ) { org . apache . hadoop . mapreduce . OutputFormat < ? , ? > output = ReflectionUtils . newInstance ( context . getOutputFormatClass ( ) , job ) ; output . checkOutputSpecs ( context ) ; } else { job . getOutputFormat ( ) . checkOutputSpecs ( fs , job ) ; } LOG . debug ( "Creating splits at " + fs . makeQualified ( submitSplitFile ) ) ; List < RawSplit > maps ; if ( job . getUseNewMapper ( ) ) { maps = computeNewSplits ( context ) ; } else { maps = computeOldSplits ( job ) ; } job . setNumMapTasks ( maps . size ( ) ) ; if ( ! isJobTrackerInProc ) { JobConf conf = null ; if ( job . getUseNewMapper ( ) ) { conf = context . getJobConf ( ) ; } else { conf = job ; } writeComputedSplits ( conf , maps , submitSplitFile ) ; job . set ( "mapred.job.split.file" , submitSplitFile . toString ( ) ) ; } else { synchronized ( JobClient . jobSplitCache ) { if ( JobClient . jobSplitCache . containsKey ( jobId ) ) { throw new IOException ( "Job split already cached " + jobId ) ; } JobClient . jobSplitCache . put ( jobId , maps ) ; } synchronized ( JobClient . jobConfCache ) { if ( JobClient . jobConfCache . containsKey ( jobId ) ) { throw new IOException ( "Job conf already cached " + jobId ) ; } jobConfCache . put ( jobId , job ) ; } } FSDataOutputStream out = FileSystem . create ( fs , submitJobFile , new FsPermission ( JOB_FILE_PERMISSION ) ) ; try { job . writeXml ( out ) ; } finally { out . close ( ) ; } JobStatus status = jobSubmitClient . submitJob ( jobId ) ; if ( status != null ) { return new NetworkedJob ( status ) ; } else { throw new IOException ( "Could not launch job" ) ; } }
Internal method for submitting jobs to the system .
32,219
private void validateNumberOfTasks ( int splits , int reduceTasks , JobConf conf ) throws IOException { int maxTasks = conf . getInt ( "mapred.jobtracker.maxtasks.per.job" , - 1 ) ; int totalTasks = splits + reduceTasks ; if ( ( maxTasks != - 1 ) && ( totalTasks > maxTasks ) ) { throw new IOException ( "The number of tasks for this job " + totalTasks + " exceeds the configured limit " + maxTasks ) ; } }
JobTrcker applies this limit against the sum of mappers and reducers .
32,220
static RawSplit [ ] readSplitFile ( DataInput in ) throws IOException { byte [ ] header = new byte [ SPLIT_FILE_HEADER . length ] ; in . readFully ( header ) ; if ( ! Arrays . equals ( SPLIT_FILE_HEADER , header ) ) { throw new IOException ( "Invalid header on split file" ) ; } int vers = WritableUtils . readVInt ( in ) ; if ( vers != CURRENT_SPLIT_FILE_VERSION ) { throw new IOException ( "Unsupported split version " + vers ) ; } int len = WritableUtils . readVInt ( in ) ; RawSplit [ ] result = new RawSplit [ len ] ; for ( int i = 0 ; i < len ; ++ i ) { result [ i ] = new RawSplit ( ) ; result [ i ] . readFields ( in ) ; } return result ; }
Read a splits file into a list of raw splits
32,221
public void displayTasks ( JobID jobId , String type , String state ) throws IOException { TaskReport [ ] reports = new TaskReport [ 0 ] ; if ( type . equals ( "map" ) ) { reports = getMapTaskReports ( jobId ) ; } else if ( type . equals ( "reduce" ) ) { reports = getReduceTaskReports ( jobId ) ; } else if ( type . equals ( "setup" ) ) { reports = getSetupTaskReports ( jobId ) ; } else if ( type . equals ( "cleanup" ) ) { reports = getCleanupTaskReports ( jobId ) ; } for ( TaskReport report : reports ) { TIPStatus status = report . getCurrentStatus ( ) ; if ( ( state . equals ( "pending" ) && status == TIPStatus . PENDING ) || ( state . equals ( "running" ) && status == TIPStatus . RUNNING ) || ( state . equals ( "completed" ) && status == TIPStatus . COMPLETE ) || ( state . equals ( "failed" ) && status == TIPStatus . FAILED ) || ( state . equals ( "killed" ) && status == TIPStatus . KILLED ) ) { printTaskAttempts ( report ) ; } } }
Display the information about a job s tasks of a particular type and in a particular state
32,222
public static RunningJob runJob ( JobConf job ) throws IOException { JobClient jc = new JobClient ( job ) ; RunningJob rj = jc . submitJob ( job ) ; try { if ( ! jc . monitorAndPrintJob ( job , rj ) ) { throw new IOException ( "Job failed!" ) ; } } catch ( InterruptedException ie ) { Thread . currentThread ( ) . interrupt ( ) ; } return rj ; }
Utility that submits a job then polls for progress until the job is complete .
32,223
public boolean monitorAndPrintJob ( JobConf conf , RunningJob job ) throws IOException , InterruptedException { String lastReport = null ; TaskStatusFilter filter ; filter = getTaskOutputFilter ( conf ) ; JobID jobId = job . getID ( ) ; LOG . info ( "Running job: " + jobId ) ; int eventCounter = 0 ; boolean profiling = conf . getProfileEnabled ( ) ; Configuration . IntegerRanges mapRanges = conf . getProfileTaskRange ( true ) ; Configuration . IntegerRanges reduceRanges = conf . getProfileTaskRange ( false ) ; while ( ! job . isComplete ( ) ) { Thread . sleep ( MAX_JOBPROFILE_AGE ) ; String report = ( " map " + StringUtils . formatPercent ( job . mapProgress ( ) , 0 ) + " reduce " + StringUtils . formatPercent ( job . reduceProgress ( ) , 0 ) ) ; if ( ! report . equals ( lastReport ) ) { LOG . info ( report ) ; lastReport = report ; } TaskCompletionEvent [ ] events = job . getTaskCompletionEvents ( eventCounter ) ; eventCounter += events . length ; for ( TaskCompletionEvent event : events ) { TaskCompletionEvent . Status status = event . getTaskStatus ( ) ; if ( profiling && ( status == TaskCompletionEvent . Status . SUCCEEDED || status == TaskCompletionEvent . Status . FAILED ) && ( event . isMap ? mapRanges : reduceRanges ) . isIncluded ( event . idWithinJob ( ) ) ) { downloadProfile ( event ) ; } switch ( filter ) { case NONE : break ; case SUCCEEDED : if ( event . getTaskStatus ( ) == TaskCompletionEvent . Status . SUCCEEDED ) { LOG . info ( event . toString ( ) ) ; displayTaskLogs ( event . getTaskAttemptId ( ) , event . getTaskTrackerHttp ( ) ) ; } break ; case FAILED : if ( event . getTaskStatus ( ) == TaskCompletionEvent . Status . FAILED ) { LOG . info ( event . toString ( ) ) ; TaskAttemptID taskId = event . getTaskAttemptId ( ) ; String [ ] taskDiagnostics = jobSubmitClient . getTaskDiagnostics ( taskId ) ; if ( taskDiagnostics != null ) { for ( String diagnostics : taskDiagnostics ) { System . err . println ( diagnostics ) ; } } displayTaskLogs ( event . getTaskAttemptId ( ) , event . getTaskTrackerHttp ( ) ) ; } break ; case KILLED : if ( event . getTaskStatus ( ) == TaskCompletionEvent . Status . KILLED ) { LOG . info ( event . toString ( ) ) ; } break ; case ALL : LOG . info ( event . toString ( ) ) ; displayTaskLogs ( event . getTaskAttemptId ( ) , event . getTaskTrackerHttp ( ) ) ; break ; } } } LOG . info ( "Job complete: " + jobId ) ; Counters counters = job . getCounters ( ) ; if ( counters != null ) { counters . log ( LOG ) ; } return job . isSuccessful ( ) ; }
Monitor a job and print status in real - time as progress is made and tasks fail .
32,224
private void listEvents ( JobID jobId , int fromEventId , int numEvents ) throws IOException { TaskCompletionEvent [ ] events = jobSubmitClient . getTaskCompletionEvents ( jobId , fromEventId , numEvents ) ; System . out . println ( "Task completion events for " + jobId ) ; System . out . println ( "Number of events (from " + fromEventId + ") are: " + events . length ) ; for ( TaskCompletionEvent event : events ) { System . out . println ( event . getTaskStatus ( ) + " " + event . getTaskAttemptId ( ) + " " + getTaskLogURL ( event . getTaskAttemptId ( ) , event . getTaskTrackerHttp ( ) ) ) ; } }
List the events for the given job
32,225
private void listJobs ( ) throws IOException { JobStatus [ ] jobs = jobsToComplete ( ) ; if ( jobs == null ) jobs = new JobStatus [ 0 ] ; System . out . printf ( "%d jobs currently running\n" , jobs . length ) ; displayJobList ( jobs ) ; }
Dump a list of currently running jobs
32,226
private void listAllJobs ( ) throws IOException { JobStatus [ ] jobs = getAllJobs ( ) ; if ( jobs == null ) jobs = new JobStatus [ 0 ] ; System . out . printf ( "%d jobs submitted\n" , jobs . length ) ; System . out . printf ( "States are:\n\tRunning : 1\tSucceded : 2" + "\tFailed : 3\tPrep : 4\n" ) ; displayJobList ( jobs ) ; }
Dump a list of all jobs submitted .
32,227
private void listActiveTrackers ( ) throws IOException { ClusterStatus c = jobSubmitClient . getClusterStatus ( true ) ; Collection < String > trackers = c . getActiveTrackerNames ( ) ; for ( String trackerName : trackers ) { System . out . println ( trackerName ) ; } }
Display the list of active trackers
32,228
private void listBlacklistedTrackers ( ) throws IOException { ClusterStatus c = jobSubmitClient . getClusterStatus ( true ) ; Collection < String > trackers = c . getBlacklistedTrackerNames ( ) ; for ( String trackerName : trackers ) { System . out . println ( trackerName ) ; } }
Display the list of blacklisted trackers
32,229
private void listTrackers ( ) throws IOException { ClusterStatus fullStatus = jobSubmitClient . getClusterStatus ( true ) ; Collection < TaskTrackerStatus > trackers = fullStatus . getTaskTrackersDetails ( ) ; Set < String > activeTrackers = new HashSet < String > ( fullStatus . getActiveTrackerNames ( ) ) ; List < Float > mapsProgress = new ArrayList < Float > ( ) ; List < Float > reducesProgress = new ArrayList < Float > ( ) ; int finishedMapsFromRunningJobs = 0 ; int finishedReducesFromRunningJobs = 0 ; System . out . println ( "Total Map Tasks in Running Jobs: " + fullStatus . getTotalMapTasks ( ) ) ; System . out . println ( "Total Reduce Tasks in Running Jobs: " + fullStatus . getTotalReduceTasks ( ) ) ; for ( TaskTrackerStatus tracker : trackers ) { System . out . println ( tracker . getTrackerName ( ) ) ; Collection < TaskStatus > tasks = fullStatus . getTaskTrackerTasksStatuses ( tracker . getTrackerName ( ) ) ; for ( TaskStatus task : tasks ) { TaskStatus . State state = task . getRunState ( ) ; if ( task . getIsMap ( ) && ( state == TaskStatus . State . RUNNING || state == TaskStatus . State . UNASSIGNED ) ) { mapsProgress . add ( task . getProgress ( ) ) ; } else if ( ! task . getIsMap ( ) && ( state == TaskStatus . State . RUNNING || state == TaskStatus . State . UNASSIGNED ) ) { reducesProgress . add ( task . getProgress ( ) ) ; } else if ( task . getIsMap ( ) && state == TaskStatus . State . SUCCEEDED ) { finishedMapsFromRunningJobs ++ ; } else if ( ! task . getIsMap ( ) && state == TaskStatus . State . SUCCEEDED ) { finishedReducesFromRunningJobs ++ ; } } if ( activeTrackers . contains ( tracker . getTrackerName ( ) ) ) { System . out . println ( "\tActive" ) ; } else { System . out . println ( "\tBlacklisted" ) ; } System . out . println ( "\tLast Seen: " + tracker . getLastSeen ( ) ) ; System . out . println ( "\tMap Tasks Running: " + tracker . countMapTasks ( ) + "/" + tracker . getMaxMapSlots ( ) ) ; System . out . println ( "\tMap Tasks Progress: " + mapsProgress . toString ( ) ) ; System . out . println ( "\tFinished Map Tasks From Running Jobs: " + finishedMapsFromRunningJobs ) ; System . out . println ( "\tReduce Tasks Running: " + tracker . countReduceTasks ( ) + "/" + tracker . getMaxReduceSlots ( ) ) ; System . out . println ( "\tReduce Tasks Progress: " + reducesProgress . toString ( ) ) ; System . out . println ( "\tTask Tracker Failures: " + tracker . getFailures ( ) ) ; mapsProgress . clear ( ) ; reducesProgress . clear ( ) ; } }
Display the stats of the cluster with per tracker details
32,230
public File getAbsolutePath ( String filename ) { if ( pathenv == null || pathSep == null || fileSep == null ) { return null ; } int val = - 1 ; String classvalue = pathenv + pathSep ; while ( ( ( val = classvalue . indexOf ( pathSep ) ) >= 0 ) && classvalue . length ( ) > 0 ) { String entry = classvalue . substring ( 0 , val ) . trim ( ) ; File f = new File ( entry ) ; try { if ( f . isDirectory ( ) ) { f = new File ( entry + fileSep + filename ) ; } if ( f . isFile ( ) && f . canRead ( ) ) { return f ; } } catch ( Exception exp ) { } classvalue = classvalue . substring ( val + 1 ) . trim ( ) ; } return null ; }
Returns the full path name of this file if it is listed in the path
32,231
private static void printSystemProperties ( ) { System . out . println ( "System properties: " ) ; java . util . Properties p = System . getProperties ( ) ; java . util . Enumeration keys = p . keys ( ) ; while ( keys . hasMoreElements ( ) ) { String thiskey = ( String ) keys . nextElement ( ) ; String value = p . getProperty ( thiskey ) ; System . out . println ( thiskey + " = " + value ) ; } }
prints all system properties for this process
32,232
public void downloadToLocalFile ( final File file ) throws InvocationTargetException , InterruptedException { PlatformUI . getWorkbench ( ) . getProgressService ( ) . busyCursorWhile ( new IRunnableWithProgress ( ) { public void run ( IProgressMonitor monitor ) throws InvocationTargetException { DFSFile . this . downloadToLocalFile ( monitor , file ) ; } } ) ; }
Download this file to the local file system . This creates a download status monitor .
32,233
public String toDetailedString ( ) { final String [ ] units = { "b" , "Kb" , "Mb" , "Gb" , "Tb" } ; int unit = 0 ; double l = this . length ; while ( ( l >= 1024.0 ) && ( unit < units . length ) ) { unit += 1 ; l /= 1024.0 ; } return String . format ( "%s (%.1f %s, r%d)" , super . toString ( ) , l , units [ unit ] , this . replication ) ; }
Provides a detailed string for this file
32,234
public void downloadToLocalFile ( IProgressMonitor monitor , File file ) throws InvocationTargetException { final int taskSize = 1024 ; monitor . setTaskName ( "Download file " + this . path ) ; BufferedOutputStream ostream = null ; DataInputStream istream = null ; try { istream = getDFS ( ) . open ( this . path ) ; ostream = new BufferedOutputStream ( new FileOutputStream ( file ) ) ; int bytes ; byte [ ] buffer = new byte [ taskSize ] ; while ( ( bytes = istream . read ( buffer ) ) >= 0 ) { if ( monitor . isCanceled ( ) ) return ; ostream . write ( buffer , 0 , bytes ) ; monitor . worked ( 1 ) ; } } catch ( Exception e ) { throw new InvocationTargetException ( e ) ; } finally { if ( istream != null ) { try { istream . close ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } try { ostream . close ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } }
Download the DfsFile to a local file . Use the given monitor to report status of operation .
32,235
public void upload ( IProgressMonitor monitor , File file ) { final int taskSize = 1024 ; monitor . setTaskName ( "Upload file " + this . path ) ; BufferedInputStream istream = null ; DataOutputStream ostream = null ; try { istream = new BufferedInputStream ( new FileInputStream ( file ) ) ; ostream = getDFS ( ) . create ( this . path ) ; int bytes ; byte [ ] buffer = new byte [ taskSize ] ; while ( ( bytes = istream . read ( buffer ) ) >= 0 ) { if ( monitor . isCanceled ( ) ) return ; ostream . write ( buffer , 0 , bytes ) ; monitor . worked ( 1 ) ; } } catch ( Exception e ) { ErrorMessageDialog . display ( String . format ( "Unable to uploade file %s to %s" , file , this . path ) , e . getLocalizedMessage ( ) ) ; } finally { try { if ( istream != null ) istream . close ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } try { if ( ostream != null ) ostream . close ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } } }
Upload a local file to this file on the distributed file system
32,236
public static DatanodeStatus getDatanodeStats ( FSNamesystem ns , ArrayList < DatanodeDescriptor > live , ArrayList < DatanodeDescriptor > dead ) { ns . DFSNodesStatus ( live , dead ) ; ArrayList < DatanodeDescriptor > decommissioning = ns . getDecommissioningNodesList ( live ) ; int numLive = live . size ( ) ; int numLiveExcluded = 0 ; int numLiveDecommissioningInProgress = decommissioning . size ( ) ; int numLiveDecommissioned = 0 ; for ( DatanodeDescriptor d : live ) { numLiveDecommissioned += d . isDecommissioned ( ) ? 1 : 0 ; numLiveExcluded += ns . inExcludedHostsList ( d , null ) ? 1 : 0 ; } int numDead = dead . size ( ) ; int numDeadExcluded = 0 ; int numDeadDecommissioningNotCompleted = 0 ; int numDeadDecommissioned = 0 ; for ( DatanodeDescriptor d : dead ) { numDeadDecommissioned += d . isDecommissioned ( ) ? 1 : 0 ; numDeadExcluded += ns . inExcludedHostsList ( d , null ) ? 1 : 0 ; } numDeadDecommissioningNotCompleted = numDeadExcluded - numDeadDecommissioned ; return new DatanodeStatus ( numLive , numLiveExcluded , numLiveDecommissioningInProgress , numLiveDecommissioned , numDead , numDeadExcluded , numDeadDecommissioningNotCompleted , numDeadDecommissioned ) ; }
Get status of the datanodes in the system .
32,237
public void finish ( ) throws IOException { if ( finished ) { return ; } finished = true ; sum . writeValue ( barray , 0 , false ) ; out . write ( barray , 0 , sum . getChecksumSize ( ) ) ; out . flush ( ) ; }
Finishes writing data to the output stream by writing the checksum bytes to the end . The underlying stream is not closed .
32,238
public boolean hardLink ( Path src , Path dst ) throws IOException { return fs . hardLink ( src , dst ) ; }
hard link Path dst to Path src . Can take place on DFS .
32,239
public boolean rename ( Path src , Path dst ) throws IOException { return fs . rename ( src , dst ) ; }
Renames Path src to Path dst . Can take place on local fs or remote DFS .
32,240
public void copyFromLocalFile ( boolean delSrc , boolean overwrite , Path [ ] srcs , Path dst ) throws IOException { fs . copyFromLocalFile ( delSrc , overwrite , srcs , dst ) ; }
The src files are on the local disk . Add it to FS at the given dst name . delSrc indicates if the source should be removed
32,241
public void copyToLocalFile ( boolean delSrc , Path src , Path dst ) throws IOException { fs . copyToLocalFile ( delSrc , src , dst ) ; }
The src file is under FS and the dst is on the local disk . Copy it from FS control to the local dst name . delSrc indicates if the src will be removed or not .
32,242
public Path startLocalOutput ( Path fsOutputFile , Path tmpLocalFile ) throws IOException { return fs . startLocalOutput ( fsOutputFile , tmpLocalFile ) ; }
Returns a local File that the user can write output to . The caller provides both the eventual FS target name and the local working file . If the FS is local we write directly into the target . If the FS is remote we write into the tmp local area .
32,243
public void completeLocalOutput ( Path fsOutputFile , Path tmpLocalFile ) throws IOException { fs . completeLocalOutput ( fsOutputFile , tmpLocalFile ) ; }
Called when we re all done writing to the target . A local FS will do nothing because we ve written to exactly the right place . A remote FS will copy the contents of tmpLocalFile to the correct target at fsOutputFile .
32,244
public void insert ( EventRecord er ) { SerializedRecord sr = new SerializedRecord ( er ) ; try { Anonymizer . anonymize ( sr ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } append ( sr ) ; }
Insert an EventRecord to the local storage after it gets serialized and anonymized .
32,245
public static StringBuffer pack ( SerializedRecord sr ) { StringBuffer sb = new StringBuffer ( ) ; ArrayList < String > keys = new ArrayList < String > ( sr . fields . keySet ( ) ) ; if ( sr . isValid ( ) ) SerializedRecord . arrangeKeys ( keys ) ; for ( int i = 0 ; i < keys . size ( ) ; i ++ ) { String value = sr . fields . get ( keys . get ( i ) ) ; sb . append ( keys . get ( i ) + ":" + value ) ; sb . append ( FIELD_SEPARATOR ) ; } return sb ; }
Pack a SerializedRecord into an array of bytes
32,246
public void upload ( ) { try { writer . flush ( ) ; if ( compress ) zipCompress ( filename ) ; String remoteName = "failmon-" ; if ( "true" . equalsIgnoreCase ( Environment . getProperty ( "anonymizer.hash.hostnames" ) ) ) remoteName += Anonymizer . getMD5Hash ( InetAddress . getLocalHost ( ) . getCanonicalHostName ( ) ) + "-" ; else remoteName += InetAddress . getLocalHost ( ) . getCanonicalHostName ( ) + "-" ; remoteName += Calendar . getInstance ( ) . getTimeInMillis ( ) ; if ( compress ) copyToHDFS ( filename + COMPRESSION_SUFFIX , hdfsDir + "/" + remoteName + COMPRESSION_SUFFIX ) ; else copyToHDFS ( filename , hdfsDir + "/" + remoteName ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } try { fw . close ( ) ; fw = new FileWriter ( filename ) ; writer = new BufferedWriter ( fw ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Upload the local file store into HDFS after it compressing it . Then a new local file is created as a temporary record store .
32,247
public static void zipCompress ( String filename ) throws IOException { FileOutputStream fos = new FileOutputStream ( filename + COMPRESSION_SUFFIX ) ; CheckedOutputStream csum = new CheckedOutputStream ( fos , new CRC32 ( ) ) ; ZipOutputStream out = new ZipOutputStream ( new BufferedOutputStream ( csum ) ) ; out . setComment ( "Failmon records." ) ; BufferedReader in = new BufferedReader ( new FileReader ( filename ) ) ; out . putNextEntry ( new ZipEntry ( new File ( filename ) . getName ( ) ) ) ; int c ; while ( ( c = in . read ( ) ) != - 1 ) out . write ( c ) ; in . close ( ) ; out . finish ( ) ; out . close ( ) ; }
Compress a text file using the ZIP compressing algorithm .
32,248
public static void copyToHDFS ( String localFile , String hdfsFile ) throws IOException { String hadoopConfPath ; if ( Environment . getProperty ( "hadoop.conf.path" ) == null ) hadoopConfPath = "../../../conf" ; else hadoopConfPath = Environment . getProperty ( "hadoop.conf.path" ) ; Configuration hadoopConf = new Configuration ( ) ; hadoopConf . addResource ( new Path ( hadoopConfPath + "/hadoop-default.xml" ) ) ; hadoopConf . addResource ( new Path ( hadoopConfPath + "/hadoop-site.xml" ) ) ; FileSystem fs = FileSystem . get ( hadoopConf ) ; Path inFile = new Path ( "file://" + localFile ) ; Path outFile = new Path ( hadoopConf . get ( "fs.default.name" ) + hdfsFile ) ; Environment . logInfo ( "Uploading to HDFS (file " + outFile + ") ..." ) ; fs . copyFromLocalFile ( false , inFile , outFile ) ; }
Copy a local file to HDFS
32,249
public static String getMD5Hash ( String text ) { MessageDigest md ; byte [ ] md5hash = new byte [ 32 ] ; try { md = MessageDigest . getInstance ( "MD5" ) ; md . update ( text . getBytes ( "iso-8859-1" ) , 0 , text . length ( ) ) ; md5hash = md . digest ( ) ; } catch ( NoSuchAlgorithmException e ) { e . printStackTrace ( ) ; } catch ( UnsupportedEncodingException e ) { e . printStackTrace ( ) ; } return convertToHex ( md5hash ) ; }
Create the MD5 digest of an input text .
32,250
static void writeInfo ( String parent , HdfsFileStatus i , XMLOutputter doc ) throws IOException { final SimpleDateFormat ldf = df . get ( ) ; doc . startTag ( i . isDir ( ) ? "directory" : "file" ) ; doc . attribute ( "path" , i . getFullPath ( new Path ( parent ) ) . toUri ( ) . getPath ( ) ) ; doc . attribute ( "modified" , ldf . format ( new Date ( i . getModificationTime ( ) ) ) ) ; doc . attribute ( "accesstime" , ldf . format ( new Date ( i . getAccessTime ( ) ) ) ) ; if ( ! i . isDir ( ) ) { doc . attribute ( "size" , String . valueOf ( i . getLen ( ) ) ) ; doc . attribute ( "replication" , String . valueOf ( i . getReplication ( ) ) ) ; doc . attribute ( "blocksize" , String . valueOf ( i . getBlockSize ( ) ) ) ; } doc . attribute ( "permission" , ( i . isDir ( ) ? "d" : "-" ) + i . getPermission ( ) ) ; doc . attribute ( "owner" , i . getOwner ( ) ) ; doc . attribute ( "group" , i . getGroup ( ) ) ; doc . endTag ( ) ; }
Write a node to output . Node information includes path modification permission owner and group . For files it also includes size replication and block - size .
32,251
protected Map < String , String > buildRoot ( HttpServletRequest request , XMLOutputter doc ) { final String path = request . getPathInfo ( ) != null ? request . getPathInfo ( ) : "/" ; final String exclude = request . getParameter ( "exclude" ) != null ? request . getParameter ( "exclude" ) : "\\..*\\.crc" ; final String filter = request . getParameter ( "filter" ) != null ? request . getParameter ( "filter" ) : ".*" ; final boolean recur = request . getParameter ( "recursive" ) != null && "yes" . equals ( request . getParameter ( "recursive" ) ) ; Map < String , String > root = new HashMap < String , String > ( ) ; root . put ( "path" , path ) ; root . put ( "recursive" , recur ? "yes" : "no" ) ; root . put ( "filter" , filter ) ; root . put ( "exclude" , exclude ) ; root . put ( "time" , df . get ( ) . format ( new Date ( ) ) ) ; root . put ( "version" , VersionInfo . getVersion ( ) ) ; return root ; }
Build a map from the query string setting values and defaults .
32,252
private boolean shouldSubmitMove ( PolicyInfo policy , Map < String , Integer > nodeToNumBlocks , List < BlockInfo > stripeBlocks ) { if ( policy == null ) { return true ; } int targetRepl = Integer . parseInt ( policy . getProperty ( "targetReplication" ) ) ; int parityRepl = Integer . parseInt ( policy . getProperty ( "metaReplication" ) ) ; Codec codec = Codec . getCodec ( policy . getCodecId ( ) ) ; int numParityBlks = codec . parityLength ; int numSrcBlks = stripeBlocks . size ( ) - numParityBlks ; int expectNumReplicas = numSrcBlks * targetRepl + numParityBlks * parityRepl ; int actualNumReplicas = 0 ; for ( int num : nodeToNumBlocks . values ( ) ) { actualNumReplicas += num ; } if ( actualNumReplicas != expectNumReplicas ) { String msg = "Expected number of replicas in the stripe: " + expectNumReplicas + ", but actual number is: " + actualNumReplicas + ". " ; if ( stripeBlocks . size ( ) > 0 ) { msg += "filePath: " + stripeBlocks . get ( 0 ) . file . getPath ( ) ; } LOG . warn ( msg ) ; } return actualNumReplicas == expectNumReplicas ; }
We will not submit more block move if the namenode hasn t deleted the over replicated blocks yet .
32,253
protected synchronized void incrNumBlockFixSimulationSuccess ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . blockFixSimulationSuccess . inc ( incr ) ; numBlockFixSimulationSuccess += incr ; }
Increments the number of new code file fixing verification success
32,254
protected synchronized void incrNumBlockFixSimulationFailures ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . blockFixSimulationFailures . inc ( incr ) ; numBlockFixSimulationFailures += incr ; }
Increments the number of new code file fixing verification failures
32,255
protected synchronized void incrFileFixFailures ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . fileFixFailures . inc ( incr ) ; numFileFixFailures += incr ; }
Increments the number of corrupt file fixing failures .
32,256
protected synchronized void incrFilesFixed ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . filesFixed . inc ( incr ) ; numFilesFixed += incr ; }
Increments the number of corrupt files that have been fixed by this integrity monitor .
32,257
protected synchronized void incrFileFixReadBytesRemoteRack ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . numFileFixReadBytesRemoteRack . inc ( incr ) ; numfileFixBytesReadRemoteRack += incr ; }
Increments the number of bytes read from remote racks during file fix operations of the this integrity monitor
32,258
protected synchronized void incrFileCopyFailures ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . fileCopyFailures . inc ( incr ) ; numFileCopyFailures += incr ; }
Increments the number of decommissioning file copy failures .
32,259
protected synchronized void incrFilesCopied ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . filesCopied . inc ( incr ) ; numFilesCopied += incr ; }
Increments the number of decommissioning files that have been copied by this integrity monitor .
32,260
synchronized void open ( ) throws IOException { if ( syncer == null ) { syncer = new SyncThread ( ) ; syncThread = new Thread ( syncer ) ; syncThread . start ( ) ; } if ( state != State . BETWEEN_LOG_SEGMENTS ) throw new IOException ( "Bad state: " + state ) ; startLogSegment ( getLastWrittenTxId ( ) + 1 , true ) ; if ( state != State . IN_SEGMENT ) throw new IOException ( "Bad state: " + state ) ; }
Create empty edit log files . Initialize the output stream for logging .
32,261
public void logSyncAll ( ) throws IOException { synchronized ( this ) { TransactionId id = myTransactionId . get ( ) ; id . txid = txid ; } logSync ( ) ; }
Blocks until all ongoing edits have been synced to disk . This differs from logSync in that it waits for edits that have been written by other threads not just edits from the calling thread .
32,262
public void logSyncIfNeeded ( ) { boolean doSync = false ; synchronized ( this ) { if ( txid > synctxid + maxBufferedTransactions ) { FSNamesystem . LOG . info ( "Out of band log sync triggered " + " because there are " + ( txid - synctxid ) + " buffered transactions which " + " is more than the configured limit of " + maxBufferedTransactions ) ; doSync = true ; } if ( shouldForceSync ( ) ) { FSNamesystem . LOG . info ( "Log sync triggered by the output stream" ) ; doSync = true ; } } if ( doSync ) { logSync ( ) ; } }
if there are too many transactions that are yet to be synced then sync them . Otherwise the in - memory buffer that keeps the transactions would grow to be very very big . This can happen when there are a large number of listStatus calls which update the access time of files .
32,263
public void logSync ( boolean doWait ) { long syncStart = 0 ; boolean thisThreadSuccess = false ; boolean thisThreadSyncing = false ; EditLogOutputStream logStream = null ; try { synchronized ( this ) { long mytxid = myTransactionId . get ( ) . txid ; myTransactionId . get ( ) . txid = - 1L ; if ( mytxid == - 1 ) { mytxid = txid ; } printStatistics ( false ) ; while ( mytxid > synctxid && isSyncRunning ) { if ( ! doWait ) { long delayedId = Server . delayResponse ( ) ; List < Long > responses = delayedSyncs . get ( mytxid ) ; if ( responses == null ) { responses = new LinkedList < Long > ( ) ; delayedSyncs . put ( mytxid , responses ) ; } responses . add ( delayedId ) ; return ; } try { wait ( 1000 ) ; } catch ( InterruptedException ie ) { } } if ( mytxid <= synctxid ) { numTransactionsBatchedInSync ++ ; if ( metrics != null ) metrics . transactionsBatchedInSync . inc ( ) ; return ; } syncStart = txid ; isSyncRunning = true ; thisThreadSyncing = true ; try { if ( journalSet . isEmpty ( ) ) { throw new IOException ( "No journals available to flush, journalset is empty" ) ; } if ( editLogStream == null ) { throw new IOException ( "No journals available to flush, editlogstream is null" ) ; } editLogStream . setReadyToFlush ( ) ; } catch ( IOException e ) { LOG . fatal ( "Could not sync enough journals to persistent storage. " + "Unsynced transactions: " + ( txid - synctxid ) , new Exception ( e ) ) ; runtime . exit ( 1 ) ; } logStream = editLogStream ; } sync ( logStream , syncStart ) ; thisThreadSuccess = true ; } finally { synchronized ( this ) { if ( thisThreadSyncing ) { if ( thisThreadSuccess ) { synctxid = syncStart ; } isSyncRunning = false ; } this . notifyAll ( ) ; } } endDelay ( syncStart ) ; }
Sync all modifications done by this thread .
32,264
public void logOpenFile ( String path , INodeFileUnderConstruction newNode ) throws IOException { AddOp op = AddOp . getInstance ( ) ; op . set ( newNode . getId ( ) , path , newNode . getReplication ( ) , newNode . getModificationTime ( ) , newNode . getAccessTime ( ) , newNode . getPreferredBlockSize ( ) , newNode . getBlocks ( ) , newNode . getPermissionStatus ( ) , newNode . getClientName ( ) , newNode . getClientMachine ( ) ) ; logEdit ( op ) ; }
Add open lease record to edit log . Records the block locations of the last block .
32,265
public void logCloseFile ( String path , INodeFile newNode ) { CloseOp op = CloseOp . getInstance ( ) ; op . set ( newNode . getId ( ) , path , newNode . getReplication ( ) , newNode . getModificationTime ( ) , newNode . getAccessTime ( ) , newNode . getPreferredBlockSize ( ) , newNode . getBlocks ( ) , newNode . getPermissionStatus ( ) , null , null ) ; logEdit ( op ) ; }
Add close lease record to edit log .
32,266
public void logAppendFile ( String path , INodeFileUnderConstruction newNode ) throws IOException { AppendOp op = AppendOp . getInstance ( ) ; op . set ( path , newNode . getBlocks ( ) , newNode . getClientName ( ) , newNode . getClientMachine ( ) ) ; logEdit ( op ) ; }
Add append file record to the edit log .
32,267
public void logMkDir ( String path , INode newNode ) { MkdirOp op = MkdirOp . getInstance ( ) ; op . set ( newNode . getId ( ) , path , newNode . getModificationTime ( ) , newNode . getPermissionStatus ( ) ) ; logEdit ( op ) ; }
Add create directory record to edit log
32,268
public void logHardLink ( String src , String dst , long timestamp ) { HardLinkOp op = HardLinkOp . getInstance ( ) ; op . set ( src , dst , timestamp ) ; logEdit ( op ) ; }
Add hardlink record to edit log
32,269
public void logRename ( String src , String dst , long timestamp ) { RenameOp op = RenameOp . getInstance ( ) ; op . set ( src , dst , timestamp ) ; logEdit ( op ) ; }
Add rename record to edit log
32,270
public void logSetReplication ( String src , short replication ) { SetReplicationOp op = SetReplicationOp . getInstance ( ) ; op . set ( src , replication ) ; logEdit ( op ) ; }
Add set replication record to edit log
32,271
public void logSetQuota ( String src , long nsQuota , long dsQuota ) { SetQuotaOp op = SetQuotaOp . getInstance ( ) ; op . set ( src , nsQuota , dsQuota ) ; logEdit ( op ) ; }
Add set namespace quota record to edit log
32,272
public void logSetPermissions ( String src , FsPermission permissions ) { SetPermissionsOp op = SetPermissionsOp . getInstance ( ) ; op . set ( src , permissions ) ; logEdit ( op ) ; }
Add set permissions record to edit log
32,273
public void logSetOwner ( String src , String username , String groupname ) { SetOwnerOp op = SetOwnerOp . getInstance ( ) ; op . set ( src , username , groupname ) ; logEdit ( op ) ; }
Add set owner record to edit log
32,274
public void logDelete ( String src , long timestamp ) { DeleteOp op = DeleteOp . getInstance ( ) ; op . set ( src , timestamp ) ; logEdit ( op ) ; }
Add delete file record to edit log
32,275
public void logGenerationStamp ( long genstamp ) { SetGenstampOp op = SetGenstampOp . getInstance ( ) ; op . set ( genstamp ) ; logEdit ( op ) ; }
Add generation stamp record to edit log
32,276
public void logTimes ( String src , long mtime , long atime ) { TimesOp op = TimesOp . getInstance ( ) ; op . set ( src , mtime , atime ) ; logEdit ( op ) ; }
Add access time record to edit log
32,277
synchronized long rollEditLog ( ) throws IOException { LOG . info ( "Rolling edit logs." ) ; long start = System . nanoTime ( ) ; endCurrentLogSegment ( true ) ; long nextTxId = getLastWrittenTxId ( ) + 1 ; startLogSegment ( nextTxId , true ) ; assert curSegmentTxId == nextTxId ; long rollTime = DFSUtil . getElapsedTimeMicroSeconds ( start ) ; if ( metrics != null ) { metrics . rollEditLogTime . inc ( rollTime ) ; metrics . tsLastEditsRoll . set ( System . currentTimeMillis ( ) ) ; } return nextTxId ; }
Finalizes the current edit log and opens a new log segment .
32,278
synchronized void startLogSegment ( final long segmentTxId , boolean writeHeaderTxn ) throws IOException { LOG . info ( "Starting log segment at " + segmentTxId ) ; if ( segmentTxId < 0 ) { throw new IOException ( "Bad txid: " + segmentTxId ) ; } if ( state != State . BETWEEN_LOG_SEGMENTS ) { throw new IOException ( "Bad state: " + state ) ; } if ( segmentTxId <= curSegmentTxId ) { throw new IOException ( "Cannot start writing to log segment " + segmentTxId + " when previous log segment started at " + curSegmentTxId ) ; } if ( segmentTxId != txid + 1 ) { throw new IOException ( "Cannot start log segment at txid " + segmentTxId + " when next expected " + ( txid + 1 ) ) ; } numTransactions = totalTimeTransactions = numTransactionsBatchedInSync = 0 ; storage . attemptRestoreRemovedStorage ( ) ; try { editLogStream = journalSet . startLogSegment ( segmentTxId ) ; } catch ( IOException ex ) { throw new IOException ( "Unable to start log segment " + segmentTxId + ": no journals successfully started." ) ; } curSegmentTxId = segmentTxId ; state = State . IN_SEGMENT ; if ( writeHeaderTxn ) { logEdit ( LogSegmentOp . getInstance ( FSEditLogOpCodes . OP_START_LOG_SEGMENT ) ) ; logSync ( ) ; } journalSet . updateJournalMetrics ( ) ; if ( timeoutRollEdits > 0 ) { FSNamesystem fsn = this . journalSet . getImage ( ) . getFSNamesystem ( ) ; if ( fsn != null ) { AutomaticEditsRoller aer = fsn . automaticEditsRoller ; if ( aer != null ) { aer . setNextRollTime ( System . currentTimeMillis ( ) + timeoutRollEdits ) ; } else { LOG . warn ( "Automatic edits roll is enabled but the roller thread " + "is not enabled. Should only happen in unit tests." ) ; } } else { LOG . warn ( "FSNamesystem is NULL in FSEditLog." ) ; } } }
Start writing to the log segment with the given txid . Transitions from BETWEEN_LOG_SEGMENTS state to IN_LOG_SEGMENT state .
32,279
synchronized void endCurrentLogSegment ( boolean writeEndTxn ) throws IOException { LOG . info ( "Ending log segment " + curSegmentTxId ) ; if ( state != State . IN_SEGMENT ) { throw new IllegalStateException ( "Bad state: " + state ) ; } waitForSyncToFinish ( ) ; if ( writeEndTxn ) { logEdit ( LogSegmentOp . getInstance ( FSEditLogOpCodes . OP_END_LOG_SEGMENT ) ) ; } logSyncAll ( ) ; printStatistics ( true ) ; final long lastTxId = getLastWrittenTxId ( ) ; try { journalSet . finalizeLogSegment ( curSegmentTxId , lastTxId ) ; editLogStream = null ; } catch ( IOException e ) { FSNamesystem . LOG . info ( "Cannot finalize log segment: " + e . toString ( ) ) ; } state = State . BETWEEN_LOG_SEGMENTS ; }
Finalize the current log segment . Transitions from IN_SEGMENT state to BETWEEN_LOG_SEGMENTS state .
32,280
public void purgeLogsOlderThan ( final long minTxIdToKeep ) { synchronized ( this ) { assert curSegmentTxId == HdfsConstants . INVALID_TXID || minTxIdToKeep <= curSegmentTxId : "cannot purge logs older than txid " + minTxIdToKeep + " when current segment starts at " + curSegmentTxId ; try { journalSet . purgeLogsOlderThan ( minTxIdToKeep ) ; } catch ( IOException ex ) { } } }
Archive any log files that are older than the given txid .
32,281
public synchronized boolean selectInputStreams ( Collection < EditLogInputStream > streams , long fromTxId , long toAtLeastTxId , int minRedundancy ) throws IOException { if ( journalSet . hasUnfinalizedSegments ( fromTxId ) ) { LOG . fatal ( "All streams should be finalized" ) ; throw new IOException ( "All streams should be finalized at startup" ) ; } boolean redundancyViolated = journalSet . selectInputStreams ( streams , fromTxId , false , false , minRedundancy ) ; try { checkForGaps ( streams , fromTxId , toAtLeastTxId , true ) ; } catch ( IOException e ) { closeAllStreams ( streams ) ; throw e ; } return redundancyViolated ; }
Select a list of input streams to load .
32,282
static void closeAllStreams ( Iterable < EditLogInputStream > streams ) { for ( EditLogInputStream s : streams ) { IOUtils . closeStream ( s ) ; } }
Close all the streams in a collection
32,283
static Class < ? extends JournalManager > getJournalClass ( Configuration conf , String uriScheme ) { String key = "dfs.name.edits.journal-plugin" + "." + uriScheme ; Class < ? extends JournalManager > clazz = null ; try { clazz = conf . getClass ( key , null , JournalManager . class ) ; } catch ( RuntimeException re ) { throw new IllegalArgumentException ( "Invalid class specified for " + uriScheme , re ) ; } if ( clazz == null ) { LOG . warn ( "No class configured for " + uriScheme + ", " + key + " is empty" ) ; throw new IllegalArgumentException ( "No class configured for " + uriScheme ) ; } return clazz ; }
Retrieve the implementation class for a Journal scheme .
32,284
public static JournalManager createJournal ( Configuration conf , URI uri , NamespaceInfo nsInfo , NameNodeMetrics metrics ) { Class < ? extends JournalManager > clazz = getJournalClass ( conf , uri . getScheme ( ) ) ; try { Constructor < ? extends JournalManager > cons = clazz . getConstructor ( Configuration . class , URI . class , NamespaceInfo . class , NameNodeMetrics . class ) ; return cons . newInstance ( conf , uri , nsInfo , metrics ) ; } catch ( Exception e ) { throw new IllegalArgumentException ( "Unable to construct journal, " + uri , e ) ; } }
Construct a custom journal manager . The class to construct is taken from the configuration .
32,285
public float getProgress ( ) throws IOException { long subprogress = 0 ; if ( null != curReader ) { subprogress = ( long ) ( curReader . getProgress ( ) * split . getLength ( idx - 1 ) ) ; } return Math . min ( 1.0f , ( progress + subprogress ) / ( float ) ( split . getLength ( ) ) ) ; }
return progress based on the amount of data processed so far .
32,286
protected boolean initNextRecordReader ( ) throws IOException { if ( curReader != null ) { curReader . close ( ) ; curReader = null ; if ( idx > 0 ) { progress += split . getLength ( idx - 1 ) ; } } if ( idx == split . getNumPaths ( ) ) { return false ; } try { curReader = rrConstructor . newInstance ( new Object [ ] { split , jc , reporter , Integer . valueOf ( idx ) } ) ; jc . set ( "map.input.file" , split . getPath ( idx ) . toString ( ) ) ; jc . setLong ( "map.input.start" , split . getOffset ( idx ) ) ; jc . setLong ( "map.input.length" , split . getLength ( idx ) ) ; } catch ( Exception e ) { throw new RuntimeException ( e ) ; } idx ++ ; return true ; }
Get the record reader for the next chunk in this CombineFileSplit .
32,287
public void setupJob ( JobContext context ) throws IOException { if ( outputPath != null ) { Path tmpDir = new Path ( outputPath , FileOutputCommitter . TEMP_DIR_NAME ) ; FileSystem fileSys = tmpDir . getFileSystem ( context . getConfiguration ( ) ) ; if ( ! fileSys . mkdirs ( tmpDir ) ) { LOG . error ( "Mkdirs failed to create " + tmpDir . toString ( ) ) ; } } }
Create the temporary directory that is the root of all of the task work directories .
32,288
public void commitJob ( JobContext context ) throws IOException { cleanupJob ( context ) ; if ( shouldMarkOutputDir ( context . getConfiguration ( ) ) ) { markOutputDirSuccessful ( context ) ; } }
Delete the temporary directory including all of the work directories . This is called for all jobs whose final run state is SUCCEEDED
32,289
public void abortJob ( JobContext context , JobStatus . State state ) throws IOException { cleanupJob ( context ) ; }
Delete the temporary directory including all of the work directories .
32,290
public void commitTask ( TaskAttemptContext context ) throws IOException { TaskAttemptID attemptId = context . getTaskAttemptID ( ) ; if ( workPath != null ) { context . progress ( ) ; if ( outputFileSystem . exists ( workPath ) ) { moveTaskOutputs ( context , outputFileSystem , outputPath , workPath ) ; if ( ! outputFileSystem . delete ( workPath , true ) ) { LOG . warn ( "Failed to delete the temporary output" + " directory of task: " + attemptId + " - " + workPath ) ; } LOG . info ( "Saved output of task '" + attemptId + "' to " + outputPath ) ; } } }
Move the files from the work directory to the job output directory
32,291
private void moveTaskOutputs ( TaskAttemptContext context , FileSystem fs , Path jobOutputDir , Path taskOutput ) throws IOException { TaskAttemptID attemptId = context . getTaskAttemptID ( ) ; context . progress ( ) ; if ( fs . isFile ( taskOutput ) ) { Path finalOutputPath = getFinalPath ( jobOutputDir , taskOutput , workPath ) ; if ( ! fs . rename ( taskOutput , finalOutputPath ) ) { if ( ! fs . delete ( finalOutputPath , true ) ) { throw new IOException ( "Failed to delete earlier output of task: " + attemptId ) ; } if ( ! fs . rename ( taskOutput , finalOutputPath ) ) { throw new IOException ( "Failed to save output of task: " + attemptId ) ; } } LOG . debug ( "Moved " + taskOutput + " to " + finalOutputPath ) ; } else if ( fs . getFileStatus ( taskOutput ) . isDir ( ) ) { FileStatus [ ] paths = fs . listStatus ( taskOutput ) ; Path finalOutputPath = getFinalPath ( jobOutputDir , taskOutput , workPath ) ; fs . mkdirs ( finalOutputPath ) ; if ( paths != null ) { for ( FileStatus path : paths ) { moveTaskOutputs ( context , fs , jobOutputDir , path . getPath ( ) ) ; } } } }
Move all of the files from the work directory to the final output
32,292
public void abortTask ( TaskAttemptContext context ) throws IOException { try { if ( workPath != null ) { context . progress ( ) ; if ( ! outputFileSystem . delete ( workPath , true ) ) { LOG . warn ( "Deleting output in " + workPath + " returns false" ) ; } } } catch ( IOException ie ) { LOG . warn ( "Error discarding output in " + workPath , ie ) ; throw ie ; } }
Delete the work directory
32,293
private Path getFinalPath ( Path jobOutputDir , Path taskOutput , Path taskOutputPath ) throws IOException { URI taskOutputUri = taskOutput . toUri ( ) ; URI relativePath = taskOutputPath . toUri ( ) . relativize ( taskOutputUri ) ; if ( taskOutputUri == relativePath ) { throw new IOException ( "Can not get the relative path: base = " + taskOutputPath + " child = " + taskOutput ) ; } if ( relativePath . getPath ( ) . length ( ) > 0 ) { return new Path ( jobOutputDir , relativePath . getPath ( ) ) ; } else { return jobOutputDir ; } }
Find the final name of a given output file given the job output directory and the work directory .
32,294
public boolean needsTaskCommit ( TaskAttemptContext context ) throws IOException { return workPath != null && outputFileSystem . exists ( workPath ) ; }
Did this task write any files in the work directory?
32,295
private void readNameToNode ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "nameToNode" ) ; coronaSerializer . readStartObjectToken ( "nameToNode" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String nodeName = coronaSerializer . getFieldName ( ) ; ClusterNode clusterNode = new ClusterNode ( coronaSerializer ) ; if ( ! nameToNode . containsKey ( nodeName ) ) { nameToNode . put ( nodeName , clusterNode ) ; } current = coronaSerializer . nextToken ( ) ; } }
Reads the nameToNode map from the JSON stream
32,296
private void readHostsToSessions ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "hostsToSessions" ) ; coronaSerializer . readStartObjectToken ( "hostsToSessions" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String host = coronaSerializer . getFieldName ( ) ; Set < String > sessionsSet = coronaSerializer . readValueAs ( Set . class ) ; hostsToSessions . put ( nameToNode . get ( host ) , sessionsSet ) ; current = coronaSerializer . nextToken ( ) ; } }
Reads the hostsToSessions map from the JSON stream
32,297
private void readNameToApps ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "nameToApps" ) ; coronaSerializer . readStartObjectToken ( "nameToApps" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String nodeName = coronaSerializer . getFieldName ( ) ; coronaSerializer . readStartObjectToken ( nodeName ) ; Map < String , String > appMap = coronaSerializer . readValueAs ( Map . class ) ; Map < ResourceType , String > appsOnNode = new HashMap < ResourceType , String > ( ) ; for ( Map . Entry < String , String > entry : appMap . entrySet ( ) ) { appsOnNode . put ( ResourceType . valueOf ( entry . getKey ( ) ) , entry . getValue ( ) ) ; } nameToApps . put ( nodeName , appsOnNode ) ; current = coronaSerializer . nextToken ( ) ; } }
Reads the nameToApps map from the JSON stream
32,298
public boolean existRunnableNodes ( ResourceType type ) { RunnableIndices r = typeToIndices . get ( type ) ; return r . existRunnableNodes ( ) ; }
See if there are any runnable nodes of a given type
32,299
public ClusterNode getRunnableNode ( String host , LocalityLevel maxLevel , ResourceType type , Set < String > excluded ) { if ( host == null ) { RunnableIndices r = typeToIndices . get ( type ) ; return r . getRunnableNodeForAny ( excluded ) ; } RequestedNode node = resolve ( host , type ) ; return getRunnableNode ( node , maxLevel , type , excluded ) ; }
Find the best matching node for this host subject to the maxLevel constraint