idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,200
public void setUrl ( String url ) throws IOException { if ( failException != null ) { throw failException ; } sessionInfo . url = url ; SessionInfo newInfo = new SessionInfo ( sessionInfo ) ; cmNotifier . addCall ( new ClusterManagerService . sessionUpdateInfo_args ( sessionId , newInfo ) ) ; }
Set the URL for this session in the ClusterManager
32,201
public void stopRemoteSession ( String remoteId ) { cmNotifier . addCall ( new ClusterManagerService . sessionEnd_args ( remoteId , SessionStatus . TIMED_OUT ) ) ; }
Stops session acquired by remote JT
32,202
public void stop ( SessionStatus status , List < ResourceType > resourceTypes , List < NodeUsageReport > reportList ) { LOG . info ( "Stopping session driver" ) ; running = false ; cmNotifier . clearCalls ( ) ; if ( reportList != null && ! reportList . isEmpty ( ) ) { cmNotifier . addCall ( new ClusterManagerService . ...
Stop the SessionDriver . This sends the message to the ClusterManager indicating that the session has ended . If reportList is not null or empty it will send the report prior to closing the session .
32,203
public void join ( ) throws InterruptedException { serverThread . join ( ) ; long start = System . currentTimeMillis ( ) ; cmNotifier . join ( SESSION_DRIVER_WAIT_INTERVAL ) ; long end = System . currentTimeMillis ( ) ; if ( end - start >= SESSION_DRIVER_WAIT_INTERVAL ) { LOG . warn ( "Taking more than " + SESSION_DRIV...
Join the underlying threads of SessionDriver
32,204
public void requestResources ( List < ResourceRequest > wanted ) throws IOException { if ( failException != null ) { throw failException ; } cmNotifier . addCall ( new ClusterManagerService . requestResource_args ( sessionId , wanted ) ) ; }
Request needed resources from the ClusterManager
32,205
public void releaseResources ( List < ResourceRequest > released ) throws IOException { if ( failException != null ) { throw failException ; } List < Integer > releasedIds = new ArrayList < Integer > ( ) ; for ( ResourceRequest req : released ) { releasedIds . add ( req . getId ( ) ) ; } cmNotifier . addCall ( new Clus...
Release the resources that are no longer used
32,206
public static Object createInstance ( String className ) { Object retv = null ; try { ClassLoader classLoader = Thread . currentThread ( ) . getContextClassLoader ( ) ; Class < ? > theFilterClass = Class . forName ( className , true , classLoader ) ; Constructor meth = theFilterClass . getDeclaredConstructor ( argArray...
Create an instance of the given class
32,207
public static void set ( long [ ] bits , int pos ) { int offset = pos >> LONG_SHIFT ; if ( offset >= bits . length ) throw new IndexOutOfBoundsException ( ) ; bits [ offset ] |= 1L << pos ; }
Set the bit for the given position to true .
32,208
public static void clear ( long [ ] bits , int pos ) { int offset = pos >> LONG_SHIFT ; if ( offset >= bits . length ) throw new IndexOutOfBoundsException ( ) ; bits [ offset ] &= ~ ( 1L << pos ) ; }
Set the bit for the given position to false .
32,209
public static boolean get ( long [ ] bits , int pos ) { int offset = pos >> LONG_SHIFT ; if ( offset >= bits . length ) return false ; return ( bits [ offset ] & ( 1L << pos ) ) != 0 ; }
Gets the bit for the given position .
32,210
public static int cardinality ( long [ ] bits ) { int card = 0 ; for ( int i = bits . length - 1 ; i >= 0 ; i -- ) { long a = bits [ i ] ; if ( a == 0 ) continue ; if ( a == - 1 ) { card += 64 ; continue ; } a = ( ( a >> 1 ) & 0x5555555555555555L ) + ( a & 0x5555555555555555L ) ; a = ( ( a >> 2 ) & 0x3333333333333333L ...
Checks the number of bits set to 1 .
32,211
void checkState ( ) throws IOException { int majority = getMajoritySize ( ) ; int numDisabled = 0 ; for ( HttpImageUploadChannel ch : uploadChannels ) { numDisabled += ch . isDisabled ( ) ? 1 : 0 ; } if ( numDisabled >= majority ) { Map < HttpImageUploadChannel , Void > successes = new HashMap < HttpImageUploadChannel ...
At each operation this function is used to ensure that we still have a majority of successful channels to which we can write .
32,212
private void flushBuffer ( boolean close ) { for ( HttpImageUploadChannel ch : uploadChannels ) { ch . send ( buffer ) ; } if ( ! close ) { buffer = new ByteArrayOutputStream ( ( int ) ( 1.2 * flushSize ) ) ; } }
Flushes the buffer to the upload channels and allocates a new buffer .
32,213
public synchronized FileSystem getFs ( ) throws IOException { if ( this . fs == null ) { Path sysDir = getSystemDir ( ) ; this . fs = sysDir . getFileSystem ( getConf ( ) ) ; } return fs ; }
Get a filesystem handle . We need this to prepare jobs for submission to the MapReduce system .
32,214
private Path copyRemoteFiles ( FileSystem jtFs , Path parentDir , Path originalPath , JobConf job , short replication , String md5 ) throws IOException { FileSystem remoteFs = null ; remoteFs = originalPath . getFileSystem ( job ) ; if ( compareFs ( remoteFs , jtFs ) ) { return originalPath ; } if ( md5 != null ) { Pat...
was copied to
32,215
private void symLinkAndConfigureFiles ( JobConf job ) throws IOException { if ( ! ( job . getBoolean ( "mapred.used.genericoptionsparser" , false ) ) ) { LOG . warn ( "Use GenericOptionsParser for parsing the arguments. " + "Applications should implement Tool for the same." ) ; } String files = job . get ( "tmpfiles" )...
Create symlinks for the files needed for the jobs in current directory
32,216
private void configureUserName ( JobConf job ) throws IOException { UnixUserGroupInformation ugi = getUGI ( job ) ; job . setUser ( ugi . getUserName ( ) ) ; if ( ugi . getGroupNames ( ) != null && ugi . getGroupNames ( ) . length > 0 ) { job . set ( "group.name" , ugi . getGroupNames ( ) [ 0 ] ) ; } if ( job . getWork...
set this user s id in job configuration so later job files can be accessed using this user s id
32,217
public RunningJob submitJob ( String jobFile ) throws FileNotFoundException , InvalidJobConfException , IOException { JobConf job = new JobConf ( jobFile ) ; return submitJob ( job ) ; }
Submit a job to the MR system .
32,218
public RunningJob submitJobInternal ( JobConf job ) throws FileNotFoundException , ClassNotFoundException , InterruptedException , IOException { boolean shared = job . getBoolean ( "mapred.cache.shared.enabled" , false ) ; JobID jobId = jobSubmitClient . getNewJobId ( ) ; Path submitJobDir = new Path ( getSystemDir ( )...
Internal method for submitting jobs to the system .
32,219
private void validateNumberOfTasks ( int splits , int reduceTasks , JobConf conf ) throws IOException { int maxTasks = conf . getInt ( "mapred.jobtracker.maxtasks.per.job" , - 1 ) ; int totalTasks = splits + reduceTasks ; if ( ( maxTasks != - 1 ) && ( totalTasks > maxTasks ) ) { throw new IOException ( "The number of t...
JobTrcker applies this limit against the sum of mappers and reducers .
32,220
static RawSplit [ ] readSplitFile ( DataInput in ) throws IOException { byte [ ] header = new byte [ SPLIT_FILE_HEADER . length ] ; in . readFully ( header ) ; if ( ! Arrays . equals ( SPLIT_FILE_HEADER , header ) ) { throw new IOException ( "Invalid header on split file" ) ; } int vers = WritableUtils . readVInt ( in ...
Read a splits file into a list of raw splits
32,221
public void displayTasks ( JobID jobId , String type , String state ) throws IOException { TaskReport [ ] reports = new TaskReport [ 0 ] ; if ( type . equals ( "map" ) ) { reports = getMapTaskReports ( jobId ) ; } else if ( type . equals ( "reduce" ) ) { reports = getReduceTaskReports ( jobId ) ; } else if ( type . equ...
Display the information about a job s tasks of a particular type and in a particular state
32,222
public static RunningJob runJob ( JobConf job ) throws IOException { JobClient jc = new JobClient ( job ) ; RunningJob rj = jc . submitJob ( job ) ; try { if ( ! jc . monitorAndPrintJob ( job , rj ) ) { throw new IOException ( "Job failed!" ) ; } } catch ( InterruptedException ie ) { Thread . currentThread ( ) . interr...
Utility that submits a job then polls for progress until the job is complete .
32,223
public boolean monitorAndPrintJob ( JobConf conf , RunningJob job ) throws IOException , InterruptedException { String lastReport = null ; TaskStatusFilter filter ; filter = getTaskOutputFilter ( conf ) ; JobID jobId = job . getID ( ) ; LOG . info ( "Running job: " + jobId ) ; int eventCounter = 0 ; boolean profiling =...
Monitor a job and print status in real - time as progress is made and tasks fail .
32,224
private void listEvents ( JobID jobId , int fromEventId , int numEvents ) throws IOException { TaskCompletionEvent [ ] events = jobSubmitClient . getTaskCompletionEvents ( jobId , fromEventId , numEvents ) ; System . out . println ( "Task completion events for " + jobId ) ; System . out . println ( "Number of events (f...
List the events for the given job
32,225
private void listJobs ( ) throws IOException { JobStatus [ ] jobs = jobsToComplete ( ) ; if ( jobs == null ) jobs = new JobStatus [ 0 ] ; System . out . printf ( "%d jobs currently running\n" , jobs . length ) ; displayJobList ( jobs ) ; }
Dump a list of currently running jobs
32,226
private void listAllJobs ( ) throws IOException { JobStatus [ ] jobs = getAllJobs ( ) ; if ( jobs == null ) jobs = new JobStatus [ 0 ] ; System . out . printf ( "%d jobs submitted\n" , jobs . length ) ; System . out . printf ( "States are:\n\tRunning : 1\tSucceded : 2" + "\tFailed : 3\tPrep : 4\n" ) ; displayJobList ( ...
Dump a list of all jobs submitted .
32,227
private void listActiveTrackers ( ) throws IOException { ClusterStatus c = jobSubmitClient . getClusterStatus ( true ) ; Collection < String > trackers = c . getActiveTrackerNames ( ) ; for ( String trackerName : trackers ) { System . out . println ( trackerName ) ; } }
Display the list of active trackers
32,228
private void listBlacklistedTrackers ( ) throws IOException { ClusterStatus c = jobSubmitClient . getClusterStatus ( true ) ; Collection < String > trackers = c . getBlacklistedTrackerNames ( ) ; for ( String trackerName : trackers ) { System . out . println ( trackerName ) ; } }
Display the list of blacklisted trackers
32,229
private void listTrackers ( ) throws IOException { ClusterStatus fullStatus = jobSubmitClient . getClusterStatus ( true ) ; Collection < TaskTrackerStatus > trackers = fullStatus . getTaskTrackersDetails ( ) ; Set < String > activeTrackers = new HashSet < String > ( fullStatus . getActiveTrackerNames ( ) ) ; List < Flo...
Display the stats of the cluster with per tracker details
32,230
public File getAbsolutePath ( String filename ) { if ( pathenv == null || pathSep == null || fileSep == null ) { return null ; } int val = - 1 ; String classvalue = pathenv + pathSep ; while ( ( ( val = classvalue . indexOf ( pathSep ) ) >= 0 ) && classvalue . length ( ) > 0 ) { String entry = classvalue . substring ( ...
Returns the full path name of this file if it is listed in the path
32,231
private static void printSystemProperties ( ) { System . out . println ( "System properties: " ) ; java . util . Properties p = System . getProperties ( ) ; java . util . Enumeration keys = p . keys ( ) ; while ( keys . hasMoreElements ( ) ) { String thiskey = ( String ) keys . nextElement ( ) ; String value = p . getP...
prints all system properties for this process
32,232
public void downloadToLocalFile ( final File file ) throws InvocationTargetException , InterruptedException { PlatformUI . getWorkbench ( ) . getProgressService ( ) . busyCursorWhile ( new IRunnableWithProgress ( ) { public void run ( IProgressMonitor monitor ) throws InvocationTargetException { DFSFile . this . downlo...
Download this file to the local file system . This creates a download status monitor .
32,233
public String toDetailedString ( ) { final String [ ] units = { "b" , "Kb" , "Mb" , "Gb" , "Tb" } ; int unit = 0 ; double l = this . length ; while ( ( l >= 1024.0 ) && ( unit < units . length ) ) { unit += 1 ; l /= 1024.0 ; } return String . format ( "%s (%.1f %s, r%d)" , super . toString ( ) , l , units [ unit ] , th...
Provides a detailed string for this file
32,234
public void downloadToLocalFile ( IProgressMonitor monitor , File file ) throws InvocationTargetException { final int taskSize = 1024 ; monitor . setTaskName ( "Download file " + this . path ) ; BufferedOutputStream ostream = null ; DataInputStream istream = null ; try { istream = getDFS ( ) . open ( this . path ) ; os...
Download the DfsFile to a local file . Use the given monitor to report status of operation .
32,235
public void upload ( IProgressMonitor monitor , File file ) { final int taskSize = 1024 ; monitor . setTaskName ( "Upload file " + this . path ) ; BufferedInputStream istream = null ; DataOutputStream ostream = null ; try { istream = new BufferedInputStream ( new FileInputStream ( file ) ) ; ostream = getDFS ( ) . crea...
Upload a local file to this file on the distributed file system
32,236
public static DatanodeStatus getDatanodeStats ( FSNamesystem ns , ArrayList < DatanodeDescriptor > live , ArrayList < DatanodeDescriptor > dead ) { ns . DFSNodesStatus ( live , dead ) ; ArrayList < DatanodeDescriptor > decommissioning = ns . getDecommissioningNodesList ( live ) ; int numLive = live . size ( ) ; int num...
Get status of the datanodes in the system .
32,237
public void finish ( ) throws IOException { if ( finished ) { return ; } finished = true ; sum . writeValue ( barray , 0 , false ) ; out . write ( barray , 0 , sum . getChecksumSize ( ) ) ; out . flush ( ) ; }
Finishes writing data to the output stream by writing the checksum bytes to the end . The underlying stream is not closed .
32,238
public boolean hardLink ( Path src , Path dst ) throws IOException { return fs . hardLink ( src , dst ) ; }
hard link Path dst to Path src . Can take place on DFS .
32,239
public boolean rename ( Path src , Path dst ) throws IOException { return fs . rename ( src , dst ) ; }
Renames Path src to Path dst . Can take place on local fs or remote DFS .
32,240
public void copyFromLocalFile ( boolean delSrc , boolean overwrite , Path [ ] srcs , Path dst ) throws IOException { fs . copyFromLocalFile ( delSrc , overwrite , srcs , dst ) ; }
The src files are on the local disk . Add it to FS at the given dst name . delSrc indicates if the source should be removed
32,241
public void copyToLocalFile ( boolean delSrc , Path src , Path dst ) throws IOException { fs . copyToLocalFile ( delSrc , src , dst ) ; }
The src file is under FS and the dst is on the local disk . Copy it from FS control to the local dst name . delSrc indicates if the src will be removed or not .
32,242
public Path startLocalOutput ( Path fsOutputFile , Path tmpLocalFile ) throws IOException { return fs . startLocalOutput ( fsOutputFile , tmpLocalFile ) ; }
Returns a local File that the user can write output to . The caller provides both the eventual FS target name and the local working file . If the FS is local we write directly into the target . If the FS is remote we write into the tmp local area .
32,243
public void completeLocalOutput ( Path fsOutputFile , Path tmpLocalFile ) throws IOException { fs . completeLocalOutput ( fsOutputFile , tmpLocalFile ) ; }
Called when we re all done writing to the target . A local FS will do nothing because we ve written to exactly the right place . A remote FS will copy the contents of tmpLocalFile to the correct target at fsOutputFile .
32,244
public void insert ( EventRecord er ) { SerializedRecord sr = new SerializedRecord ( er ) ; try { Anonymizer . anonymize ( sr ) ; } catch ( Exception e ) { e . printStackTrace ( ) ; } append ( sr ) ; }
Insert an EventRecord to the local storage after it gets serialized and anonymized .
32,245
public static StringBuffer pack ( SerializedRecord sr ) { StringBuffer sb = new StringBuffer ( ) ; ArrayList < String > keys = new ArrayList < String > ( sr . fields . keySet ( ) ) ; if ( sr . isValid ( ) ) SerializedRecord . arrangeKeys ( keys ) ; for ( int i = 0 ; i < keys . size ( ) ; i ++ ) { String value = sr . fi...
Pack a SerializedRecord into an array of bytes
32,246
public void upload ( ) { try { writer . flush ( ) ; if ( compress ) zipCompress ( filename ) ; String remoteName = "failmon-" ; if ( "true" . equalsIgnoreCase ( Environment . getProperty ( "anonymizer.hash.hostnames" ) ) ) remoteName += Anonymizer . getMD5Hash ( InetAddress . getLocalHost ( ) . getCanonicalHostName ( )...
Upload the local file store into HDFS after it compressing it . Then a new local file is created as a temporary record store .
32,247
public static void zipCompress ( String filename ) throws IOException { FileOutputStream fos = new FileOutputStream ( filename + COMPRESSION_SUFFIX ) ; CheckedOutputStream csum = new CheckedOutputStream ( fos , new CRC32 ( ) ) ; ZipOutputStream out = new ZipOutputStream ( new BufferedOutputStream ( csum ) ) ; out . set...
Compress a text file using the ZIP compressing algorithm .
32,248
public static void copyToHDFS ( String localFile , String hdfsFile ) throws IOException { String hadoopConfPath ; if ( Environment . getProperty ( "hadoop.conf.path" ) == null ) hadoopConfPath = "../../../conf" ; else hadoopConfPath = Environment . getProperty ( "hadoop.conf.path" ) ; Configuration hadoopConf = new Con...
Copy a local file to HDFS
32,249
public static String getMD5Hash ( String text ) { MessageDigest md ; byte [ ] md5hash = new byte [ 32 ] ; try { md = MessageDigest . getInstance ( "MD5" ) ; md . update ( text . getBytes ( "iso-8859-1" ) , 0 , text . length ( ) ) ; md5hash = md . digest ( ) ; } catch ( NoSuchAlgorithmException e ) { e . printStackTrace...
Create the MD5 digest of an input text .
32,250
static void writeInfo ( String parent , HdfsFileStatus i , XMLOutputter doc ) throws IOException { final SimpleDateFormat ldf = df . get ( ) ; doc . startTag ( i . isDir ( ) ? "directory" : "file" ) ; doc . attribute ( "path" , i . getFullPath ( new Path ( parent ) ) . toUri ( ) . getPath ( ) ) ; doc . attribute ( "mod...
Write a node to output . Node information includes path modification permission owner and group . For files it also includes size replication and block - size .
32,251
protected Map < String , String > buildRoot ( HttpServletRequest request , XMLOutputter doc ) { final String path = request . getPathInfo ( ) != null ? request . getPathInfo ( ) : "/" ; final String exclude = request . getParameter ( "exclude" ) != null ? request . getParameter ( "exclude" ) : "\\..*\\.crc" ; final Str...
Build a map from the query string setting values and defaults .
32,252
private boolean shouldSubmitMove ( PolicyInfo policy , Map < String , Integer > nodeToNumBlocks , List < BlockInfo > stripeBlocks ) { if ( policy == null ) { return true ; } int targetRepl = Integer . parseInt ( policy . getProperty ( "targetReplication" ) ) ; int parityRepl = Integer . parseInt ( policy . getProperty ...
We will not submit more block move if the namenode hasn t deleted the over replicated blocks yet .
32,253
protected synchronized void incrNumBlockFixSimulationSuccess ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . blockFixSimulationSuccess . inc ( incr ) ; numBlockFixSimulati...
Increments the number of new code file fixing verification success
32,254
protected synchronized void incrNumBlockFixSimulationFailures ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . blockFixSimulationFailures . inc ( incr ) ; numBlockFixSimula...
Increments the number of new code file fixing verification failures
32,255
protected synchronized void incrFileFixFailures ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . fileFixFailures . inc ( incr ) ; numFileFixFailures += incr ; }
Increments the number of corrupt file fixing failures .
32,256
protected synchronized void incrFilesFixed ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . filesFixed . inc ( incr ) ; numFilesFixed += incr ; }
Increments the number of corrupt files that have been fixed by this integrity monitor .
32,257
protected synchronized void incrFileFixReadBytesRemoteRack ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . numFileFixReadBytesRemoteRack . inc ( incr ) ; numfileFixBytesRe...
Increments the number of bytes read from remote racks during file fix operations of the this integrity monitor
32,258
protected synchronized void incrFileCopyFailures ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . fileCopyFailures . inc ( incr ) ; numFileCopyFailures += incr ; }
Increments the number of decommissioning file copy failures .
32,259
protected synchronized void incrFilesCopied ( long incr ) { if ( incr < 0 ) { throw new IllegalArgumentException ( "Cannot increment by negative value " + incr ) ; } RaidNodeMetrics . getInstance ( RaidNodeMetrics . DEFAULT_NAMESPACE_ID ) . filesCopied . inc ( incr ) ; numFilesCopied += incr ; }
Increments the number of decommissioning files that have been copied by this integrity monitor .
32,260
synchronized void open ( ) throws IOException { if ( syncer == null ) { syncer = new SyncThread ( ) ; syncThread = new Thread ( syncer ) ; syncThread . start ( ) ; } if ( state != State . BETWEEN_LOG_SEGMENTS ) throw new IOException ( "Bad state: " + state ) ; startLogSegment ( getLastWrittenTxId ( ) + 1 , true ) ; if ...
Create empty edit log files . Initialize the output stream for logging .
32,261
public void logSyncAll ( ) throws IOException { synchronized ( this ) { TransactionId id = myTransactionId . get ( ) ; id . txid = txid ; } logSync ( ) ; }
Blocks until all ongoing edits have been synced to disk . This differs from logSync in that it waits for edits that have been written by other threads not just edits from the calling thread .
32,262
public void logSyncIfNeeded ( ) { boolean doSync = false ; synchronized ( this ) { if ( txid > synctxid + maxBufferedTransactions ) { FSNamesystem . LOG . info ( "Out of band log sync triggered " + " because there are " + ( txid - synctxid ) + " buffered transactions which " + " is more than the configured limit of " +...
if there are too many transactions that are yet to be synced then sync them . Otherwise the in - memory buffer that keeps the transactions would grow to be very very big . This can happen when there are a large number of listStatus calls which update the access time of files .
32,263
public void logSync ( boolean doWait ) { long syncStart = 0 ; boolean thisThreadSuccess = false ; boolean thisThreadSyncing = false ; EditLogOutputStream logStream = null ; try { synchronized ( this ) { long mytxid = myTransactionId . get ( ) . txid ; myTransactionId . get ( ) . txid = - 1L ; if ( mytxid == - 1 ) { myt...
Sync all modifications done by this thread .
32,264
public void logOpenFile ( String path , INodeFileUnderConstruction newNode ) throws IOException { AddOp op = AddOp . getInstance ( ) ; op . set ( newNode . getId ( ) , path , newNode . getReplication ( ) , newNode . getModificationTime ( ) , newNode . getAccessTime ( ) , newNode . getPreferredBlockSize ( ) , newNode . ...
Add open lease record to edit log . Records the block locations of the last block .
32,265
public void logCloseFile ( String path , INodeFile newNode ) { CloseOp op = CloseOp . getInstance ( ) ; op . set ( newNode . getId ( ) , path , newNode . getReplication ( ) , newNode . getModificationTime ( ) , newNode . getAccessTime ( ) , newNode . getPreferredBlockSize ( ) , newNode . getBlocks ( ) , newNode . getPe...
Add close lease record to edit log .
32,266
public void logAppendFile ( String path , INodeFileUnderConstruction newNode ) throws IOException { AppendOp op = AppendOp . getInstance ( ) ; op . set ( path , newNode . getBlocks ( ) , newNode . getClientName ( ) , newNode . getClientMachine ( ) ) ; logEdit ( op ) ; }
Add append file record to the edit log .
32,267
public void logMkDir ( String path , INode newNode ) { MkdirOp op = MkdirOp . getInstance ( ) ; op . set ( newNode . getId ( ) , path , newNode . getModificationTime ( ) , newNode . getPermissionStatus ( ) ) ; logEdit ( op ) ; }
Add create directory record to edit log
32,268
public void logHardLink ( String src , String dst , long timestamp ) { HardLinkOp op = HardLinkOp . getInstance ( ) ; op . set ( src , dst , timestamp ) ; logEdit ( op ) ; }
Add hardlink record to edit log
32,269
public void logRename ( String src , String dst , long timestamp ) { RenameOp op = RenameOp . getInstance ( ) ; op . set ( src , dst , timestamp ) ; logEdit ( op ) ; }
Add rename record to edit log
32,270
public void logSetReplication ( String src , short replication ) { SetReplicationOp op = SetReplicationOp . getInstance ( ) ; op . set ( src , replication ) ; logEdit ( op ) ; }
Add set replication record to edit log
32,271
public void logSetQuota ( String src , long nsQuota , long dsQuota ) { SetQuotaOp op = SetQuotaOp . getInstance ( ) ; op . set ( src , nsQuota , dsQuota ) ; logEdit ( op ) ; }
Add set namespace quota record to edit log
32,272
public void logSetPermissions ( String src , FsPermission permissions ) { SetPermissionsOp op = SetPermissionsOp . getInstance ( ) ; op . set ( src , permissions ) ; logEdit ( op ) ; }
Add set permissions record to edit log
32,273
public void logSetOwner ( String src , String username , String groupname ) { SetOwnerOp op = SetOwnerOp . getInstance ( ) ; op . set ( src , username , groupname ) ; logEdit ( op ) ; }
Add set owner record to edit log
32,274
public void logDelete ( String src , long timestamp ) { DeleteOp op = DeleteOp . getInstance ( ) ; op . set ( src , timestamp ) ; logEdit ( op ) ; }
Add delete file record to edit log
32,275
public void logGenerationStamp ( long genstamp ) { SetGenstampOp op = SetGenstampOp . getInstance ( ) ; op . set ( genstamp ) ; logEdit ( op ) ; }
Add generation stamp record to edit log
32,276
public void logTimes ( String src , long mtime , long atime ) { TimesOp op = TimesOp . getInstance ( ) ; op . set ( src , mtime , atime ) ; logEdit ( op ) ; }
Add access time record to edit log
32,277
synchronized long rollEditLog ( ) throws IOException { LOG . info ( "Rolling edit logs." ) ; long start = System . nanoTime ( ) ; endCurrentLogSegment ( true ) ; long nextTxId = getLastWrittenTxId ( ) + 1 ; startLogSegment ( nextTxId , true ) ; assert curSegmentTxId == nextTxId ; long rollTime = DFSUtil . getElapsedTim...
Finalizes the current edit log and opens a new log segment .
32,278
synchronized void startLogSegment ( final long segmentTxId , boolean writeHeaderTxn ) throws IOException { LOG . info ( "Starting log segment at " + segmentTxId ) ; if ( segmentTxId < 0 ) { throw new IOException ( "Bad txid: " + segmentTxId ) ; } if ( state != State . BETWEEN_LOG_SEGMENTS ) { throw new IOException ( "B...
Start writing to the log segment with the given txid . Transitions from BETWEEN_LOG_SEGMENTS state to IN_LOG_SEGMENT state .
32,279
synchronized void endCurrentLogSegment ( boolean writeEndTxn ) throws IOException { LOG . info ( "Ending log segment " + curSegmentTxId ) ; if ( state != State . IN_SEGMENT ) { throw new IllegalStateException ( "Bad state: " + state ) ; } waitForSyncToFinish ( ) ; if ( writeEndTxn ) { logEdit ( LogSegmentOp . getInstan...
Finalize the current log segment . Transitions from IN_SEGMENT state to BETWEEN_LOG_SEGMENTS state .
32,280
public void purgeLogsOlderThan ( final long minTxIdToKeep ) { synchronized ( this ) { assert curSegmentTxId == HdfsConstants . INVALID_TXID || minTxIdToKeep <= curSegmentTxId : "cannot purge logs older than txid " + minTxIdToKeep + " when current segment starts at " + curSegmentTxId ; try { journalSet . purgeLogsOlderT...
Archive any log files that are older than the given txid .
32,281
public synchronized boolean selectInputStreams ( Collection < EditLogInputStream > streams , long fromTxId , long toAtLeastTxId , int minRedundancy ) throws IOException { if ( journalSet . hasUnfinalizedSegments ( fromTxId ) ) { LOG . fatal ( "All streams should be finalized" ) ; throw new IOException ( "All streams sh...
Select a list of input streams to load .
32,282
static void closeAllStreams ( Iterable < EditLogInputStream > streams ) { for ( EditLogInputStream s : streams ) { IOUtils . closeStream ( s ) ; } }
Close all the streams in a collection
32,283
static Class < ? extends JournalManager > getJournalClass ( Configuration conf , String uriScheme ) { String key = "dfs.name.edits.journal-plugin" + "." + uriScheme ; Class < ? extends JournalManager > clazz = null ; try { clazz = conf . getClass ( key , null , JournalManager . class ) ; } catch ( RuntimeException re )...
Retrieve the implementation class for a Journal scheme .
32,284
public static JournalManager createJournal ( Configuration conf , URI uri , NamespaceInfo nsInfo , NameNodeMetrics metrics ) { Class < ? extends JournalManager > clazz = getJournalClass ( conf , uri . getScheme ( ) ) ; try { Constructor < ? extends JournalManager > cons = clazz . getConstructor ( Configuration . class ...
Construct a custom journal manager . The class to construct is taken from the configuration .
32,285
public float getProgress ( ) throws IOException { long subprogress = 0 ; if ( null != curReader ) { subprogress = ( long ) ( curReader . getProgress ( ) * split . getLength ( idx - 1 ) ) ; } return Math . min ( 1.0f , ( progress + subprogress ) / ( float ) ( split . getLength ( ) ) ) ; }
return progress based on the amount of data processed so far .
32,286
protected boolean initNextRecordReader ( ) throws IOException { if ( curReader != null ) { curReader . close ( ) ; curReader = null ; if ( idx > 0 ) { progress += split . getLength ( idx - 1 ) ; } } if ( idx == split . getNumPaths ( ) ) { return false ; } try { curReader = rrConstructor . newInstance ( new Object [ ] {...
Get the record reader for the next chunk in this CombineFileSplit .
32,287
public void setupJob ( JobContext context ) throws IOException { if ( outputPath != null ) { Path tmpDir = new Path ( outputPath , FileOutputCommitter . TEMP_DIR_NAME ) ; FileSystem fileSys = tmpDir . getFileSystem ( context . getConfiguration ( ) ) ; if ( ! fileSys . mkdirs ( tmpDir ) ) { LOG . error ( "Mkdirs failed ...
Create the temporary directory that is the root of all of the task work directories .
32,288
public void commitJob ( JobContext context ) throws IOException { cleanupJob ( context ) ; if ( shouldMarkOutputDir ( context . getConfiguration ( ) ) ) { markOutputDirSuccessful ( context ) ; } }
Delete the temporary directory including all of the work directories . This is called for all jobs whose final run state is SUCCEEDED
32,289
public void abortJob ( JobContext context , JobStatus . State state ) throws IOException { cleanupJob ( context ) ; }
Delete the temporary directory including all of the work directories .
32,290
public void commitTask ( TaskAttemptContext context ) throws IOException { TaskAttemptID attemptId = context . getTaskAttemptID ( ) ; if ( workPath != null ) { context . progress ( ) ; if ( outputFileSystem . exists ( workPath ) ) { moveTaskOutputs ( context , outputFileSystem , outputPath , workPath ) ; if ( ! outputF...
Move the files from the work directory to the job output directory
32,291
private void moveTaskOutputs ( TaskAttemptContext context , FileSystem fs , Path jobOutputDir , Path taskOutput ) throws IOException { TaskAttemptID attemptId = context . getTaskAttemptID ( ) ; context . progress ( ) ; if ( fs . isFile ( taskOutput ) ) { Path finalOutputPath = getFinalPath ( jobOutputDir , taskOutput ,...
Move all of the files from the work directory to the final output
32,292
public void abortTask ( TaskAttemptContext context ) throws IOException { try { if ( workPath != null ) { context . progress ( ) ; if ( ! outputFileSystem . delete ( workPath , true ) ) { LOG . warn ( "Deleting output in " + workPath + " returns false" ) ; } } } catch ( IOException ie ) { LOG . warn ( "Error discarding...
Delete the work directory
32,293
private Path getFinalPath ( Path jobOutputDir , Path taskOutput , Path taskOutputPath ) throws IOException { URI taskOutputUri = taskOutput . toUri ( ) ; URI relativePath = taskOutputPath . toUri ( ) . relativize ( taskOutputUri ) ; if ( taskOutputUri == relativePath ) { throw new IOException ( "Can not get the relativ...
Find the final name of a given output file given the job output directory and the work directory .
32,294
public boolean needsTaskCommit ( TaskAttemptContext context ) throws IOException { return workPath != null && outputFileSystem . exists ( workPath ) ; }
Did this task write any files in the work directory?
32,295
private void readNameToNode ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "nameToNode" ) ; coronaSerializer . readStartObjectToken ( "nameToNode" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String nodeName = coronaS...
Reads the nameToNode map from the JSON stream
32,296
private void readHostsToSessions ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "hostsToSessions" ) ; coronaSerializer . readStartObjectToken ( "hostsToSessions" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String hos...
Reads the hostsToSessions map from the JSON stream
32,297
private void readNameToApps ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "nameToApps" ) ; coronaSerializer . readStartObjectToken ( "nameToApps" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String nodeName = coronaS...
Reads the nameToApps map from the JSON stream
32,298
public boolean existRunnableNodes ( ResourceType type ) { RunnableIndices r = typeToIndices . get ( type ) ; return r . existRunnableNodes ( ) ; }
See if there are any runnable nodes of a given type
32,299
public ClusterNode getRunnableNode ( String host , LocalityLevel maxLevel , ResourceType type , Set < String > excluded ) { if ( host == null ) { RunnableIndices r = typeToIndices . get ( type ) ; return r . getRunnableNodeForAny ( excluded ) ; } RequestedNode node = resolve ( host , type ) ; return getRunnableNode ( n...
Find the best matching node for this host subject to the maxLevel constraint