idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
32,000
private EventRecord readGroup ( EventRecord er , StringBuffer sb , String prefix ) { Pattern pattern = Pattern . compile ( ".*(" + prefix + "\\s*\\d*)\\s*:\\s*(\\+?\\d+)" , Pattern . MULTILINE ) ; Matcher matcher = pattern . matcher ( sb ) ; while ( matcher . find ( ) ) er . set ( matcher . group ( 1 ) , matcher . grou...
Reads and parses lines that provide the output of a group of sensors with the same functionality .
32,001
public static boolean isChecksumFile ( Path file ) { String name = file . getName ( ) ; return name . startsWith ( "." ) && name . endsWith ( ".crc" ) ; }
Return true iff file is a checksum file name .
32,002
public void copyToLocalFile ( Path src , Path dst , boolean copyCrc ) throws IOException { if ( ! fs . isDirectory ( src ) ) { fs . copyToLocalFile ( src , dst ) ; FileSystem localFs = getLocal ( getConf ( ) ) . getRawFileSystem ( ) ; if ( localFs . isDirectory ( dst ) ) { dst = new Path ( dst , src . getName ( ) ) ; }...
The src file is under FS and the dst is on the local disk . Copy it from FS control to the local dst name . If src and dst are directories the copyCrc parameter determines whether to copy CRC files .
32,003
public boolean reportChecksumFailure ( Path f , FSDataInputStream in , long inPos , FSDataInputStream sums , long sumsPos ) { return false ; }
Report a checksum error to the file system .
32,004
public CRC32 recoverParityBlockToStream ( FileSystem fs , FileStatus srcStat , long blockSize , Path parityFile , long corruptOffset , OutputStream out , Progressable progress ) throws IOException { LOG . info ( "Recovering parity block" + parityFile + ":" + corruptOffset ) ; Path srcFile = srcStat . getPath ( ) ; corr...
Recovers a corrupt block in a parity file to a local file .
32,005
void encodeStripe ( InputStream [ ] blocks , long blockSize , OutputStream [ ] outs , CRC32 [ ] crcOuts , Progressable reporter , boolean computeSrcChecksum , List < Integer > errorLocations ) throws IOException { configureBuffers ( blockSize ) ; int boundedBufferCapacity = 1 ; ParallelStreamReader parallelReader = new...
Wraps around encodeStripeImpl in order to configure buffers . Having buffers of the right size is extremely important . If the the buffer size is not a divisor of the block size we may end up reading across block boundaries .
32,006
public static void writeStringOpt ( DataOutput out , String str ) throws IOException { if ( str == null ) { WritableUtils . writeVInt ( out , NULL_STRING_LENGTH ) ; return ; } final int len = str . length ( ) ; TempArrays ta = UTF8 . getArrays ( len ) ; byte [ ] rawBytes = ta . byteArray ; char [ ] charArray = ta . cha...
Writes the string to the output if possible the encoding part is optimized .
32,007
public EventRecord query ( String device ) throws UnknownHostException { StringBuffer sb = Environment . runCommand ( "/sbin/ifconfig " + device ) ; EventRecord retval = new EventRecord ( InetAddress . getLocalHost ( ) . getCanonicalHostName ( ) , InetAddress . getAllByName ( InetAddress . getLocalHost ( ) . getHostNam...
Reads and parses the output of ifconfig for a specified NIC and creates an appropriate EventRecord that holds the desirable information for it .
32,008
public boolean next ( K key , TupleWritable value ) throws IOException { if ( jc . flush ( value ) ) { WritableUtils . cloneInto ( key , jc . key ( ) ) ; return true ; } jc . clear ( ) ; K iterkey = createKey ( ) ; final PriorityQueue < ComposableRecordReader < K , ? > > q = getRecordReaderQueue ( ) ; while ( ! q . isE...
Emit the next set of key value pairs as defined by the child RecordReaders and operation associated with this composite RR .
32,009
public void store ( JobInProgress job ) { if ( active && retainTime > 0 ) { JobID jobId = job . getStatus ( ) . getJobID ( ) ; Path jobStatusFile = getInfoFilePath ( jobId ) ; try { FSDataOutputStream dataOut = fs . create ( jobStatusFile ) ; job . getStatus ( ) . write ( dataOut ) ; job . getProfile ( ) . write ( data...
Persists a job in DFS .
32,010
public JobStatus readJobStatus ( JobID jobId ) { JobStatus jobStatus = null ; if ( null == jobId ) { LOG . warn ( "Could not read job status for null jobId" ) ; return null ; } if ( active ) { try { FSDataInputStream dataIn = getJobInfoFile ( jobId ) ; if ( dataIn != null ) { jobStatus = readJobStatus ( dataIn ) ; data...
This method retrieves JobStatus information from DFS stored using store method .
32,011
public JobProfile readJobProfile ( JobID jobId ) { JobProfile jobProfile = null ; if ( active ) { try { FSDataInputStream dataIn = getJobInfoFile ( jobId ) ; if ( dataIn != null ) { readJobStatus ( dataIn ) ; jobProfile = readJobProfile ( dataIn ) ; dataIn . close ( ) ; } } catch ( IOException ex ) { LOG . warn ( "Coul...
This method retrieves JobProfile information from DFS stored using store method .
32,012
public Counters readCounters ( JobID jobId ) { Counters counters = null ; if ( active ) { try { FSDataInputStream dataIn = getJobInfoFile ( jobId ) ; if ( dataIn != null ) { readJobStatus ( dataIn ) ; readJobProfile ( dataIn ) ; counters = readCounters ( dataIn ) ; dataIn . close ( ) ; } } catch ( IOException ex ) { LO...
This method retrieves Counters information from DFS stored using store method .
32,013
public TaskCompletionEvent [ ] readJobTaskCompletionEvents ( JobID jobId , int fromEventId , int maxEvents ) { TaskCompletionEvent [ ] events = TaskCompletionEvent . EMPTY_ARRAY ; if ( active ) { try { FSDataInputStream dataIn = getJobInfoFile ( jobId ) ; if ( dataIn != null ) { readJobStatus ( dataIn ) ; readJobProfil...
This method retrieves TaskCompletionEvents information from DFS stored using store method .
32,014
public static int findBytes ( byte [ ] utf , int start , int end , byte [ ] b ) { int matchEnd = end - b . length ; for ( int i = start ; i <= matchEnd ; i ++ ) { boolean matched = true ; for ( int j = 0 ; j < b . length ; j ++ ) { if ( utf [ i + j ] != b [ j ] ) { matched = false ; break ; } } if ( matched ) { return ...
Find the first occurrence of the given bytes b in a UTF - 8 encoded string
32,015
public String getStatus ( ) { StringBuffer s = new StringBuffer ( ) ; s . append ( "Maps : " + completedMaps + "/" + totalMaps ) ; s . append ( " (" + mapProgress + ")" ) ; s . append ( " Reduces : " + completedReduces + "/" + totalReduces ) ; s . append ( " (" + reduceProgress + ")" ) ; return s . toString ( ) ; }
Returns a string representation of this job status
32,016
void update ( JobStatus status ) { this . status = status ; try { this . counters = running . getCounters ( ) ; this . completed = running . isComplete ( ) ; this . successful = running . isSuccessful ( ) ; this . mapProgress = running . mapProgress ( ) ; this . reduceProgress = running . reduceProgress ( ) ; } catch (...
Update this job status according to the given JobStatus
32,017
private synchronized void setFlush ( boolean immediateFlush ) { try { Set < FileAppender > flushedFileAppenders = new HashSet < FileAppender > ( ) ; Enumeration < ? > currentLoggers = LogManager . getLoggerRepository ( ) . getCurrentLoggers ( ) ; while ( currentLoggers . hasMoreElements ( ) ) { Object nextLogger = curr...
Set immediateFlush property for all file appenders .
32,018
public static void readState ( String fname ) { filename = fname ; try { persData . load ( new FileInputStream ( filename ) ) ; } catch ( FileNotFoundException e1 ) { } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Read the state of parsing for all open log files from a property file .
32,019
public static ParseState getState ( String fname ) { String [ ] fields = persData . getProperty ( fname , "null" + SEPARATOR + "0" ) . split ( SEPARATOR , 2 ) ; String firstLine ; long offset ; if ( fields . length < 2 ) { System . err . println ( "Malformed persistent state data found" ) ; Environment . logInfo ( "Mal...
Read and return the state of parsing for a particular log file .
32,020
public static void setState ( ParseState state ) { if ( state == null ) { System . err . println ( "Null state found" ) ; Environment . logInfo ( "Null state found" ) ; } persData . setProperty ( state . filename , state . firstLine + SEPARATOR + state . offset ) ; }
Set the state of parsing for a particular log file .
32,021
public static void updateState ( String filename , String firstLine , long offset ) { ParseState ps = getState ( filename ) ; if ( firstLine != null ) ps . firstLine = firstLine ; ps . offset = offset ; setState ( ps ) ; }
Upadate the state of parsing for a particular log file .
32,022
public static void writeState ( String fname ) { try { persData . store ( new FileOutputStream ( fname ) , Calendar . getInstance ( ) . getTime ( ) . toString ( ) ) ; } catch ( FileNotFoundException e1 ) { e1 . printStackTrace ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } }
Write the state of parsing for all open log files to a property file on disk .
32,023
protected String getCountQuery ( ) { if ( dbConf . getInputCountQuery ( ) != null ) { return dbConf . getInputCountQuery ( ) ; } StringBuilder query = new StringBuilder ( ) ; query . append ( "SELECT COUNT(*) FROM " + tableName ) ; if ( conditions != null && conditions . length ( ) > 0 ) query . append ( " WHERE " + co...
Returns the query for getting the total number of rows subclasses can override this for custom behaviour .
32,024
public void delete ( ) { try { getDFS ( ) . delete ( this . path , true ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; MessageDialog . openWarning ( null , "Delete file" , "Unable to delete file \"" + this . path + "\"\n" + e ) ; } }
Does a recursive delete of the remote directory tree at this node .
32,025
DistributedFileSystem getDFS ( ) throws IOException { if ( this . dfs == null ) { FileSystem fs = location . getDFS ( ) ; if ( ! ( fs instanceof DistributedFileSystem ) ) { ErrorMessageDialog . display ( "DFS Browser" , "The DFS Browser cannot browse anything else " + "but a Distributed File System!" ) ; throw new IOEx...
Gets a connection to the DFS
32,026
int getCap ( int totalRunnableTasks , int localMaxTasks , int totalSlots ) { double load = maxDiff + ( ( double ) totalRunnableTasks ) / totalSlots ; int cap = ( int ) Math . min ( localMaxTasks , Math . ceil ( load * localMaxTasks ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "load:" + load + " maxDiff:" + max...
Determine how many tasks of a given type we want to run on a TaskTracker . This cap is chosen based on how many tasks of that type are outstanding in total so that when the cluster is used below capacity tasks are spread out uniformly across the nodes rather than being clumped up on whichever machines sent out heartbea...
32,027
private void tryReloadingEditLog ( ) throws IOException { LOG . info ( "Segment - trying to reload edit log segment" ) ; sleep ( errorSleepTimeout ) ; checkProgress ( ) ; setupIngestStreamWithRetries ( currentSegmentTxId ) ; refreshStreamPosition ( ) ; }
On error when reading transactions from the stream try reloading the input stream .
32,028
private void updateState ( FSEditLogOp op , boolean checkTxnId ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . SERVERLOGREADER_UPDATE , op ) ; if ( checkTxnId ) { mostRecentlyReadTransactionTxId = ServerLogReaderUtil . checkTransactionId ( mostRecentlyReadTransactionTxId , op ) ; } updateStrea...
For each operation read from the stream check if this is a closing transaction . If so we are sure we need to move to the next segment .
32,029
private void refreshStreamPosition ( ) throws IOException { if ( currentEditLogInputStreamPosition != - 1 ) { currentEditLogInputStream . refresh ( currentEditLogInputStreamPosition , mostRecentlyReadTransactionTxId ) ; } else { currentEditLogInputStreamPosition = currentEditLogInputStream . getPosition ( ) ; } }
Called to refresh the position in the current input stream to the last ACK d one .
32,030
private void refreshInputStream ( ) throws IOException { if ( currentEditLogInputStream == null ) { LOG . info ( "Segment - setup input stream for txid: " + currentSegmentTxId ) ; setupIngestStreamWithRetries ( currentSegmentTxId ) ; if ( currentEditLogInputStreamPosition == - 1 ) { currentEditLogInputStreamPosition = ...
On normal activity when we reach END_LOG_SEGMENT we will null the stream and at next read we want to instantiate a stream for next segment .
32,031
protected void initialize ( ) throws IOException { for ( int i = 0 ; i < 3 ; i ++ ) { try { LOG . info ( "Detecting current primary node - attempt " + i ) ; detectJournalManager ( ) ; LOG . info ( "Finding oldest segment txid - attempt " + i ) ; currentSegmentTxId = findOldestLogSegmentTxid ( ) ; LOG . info ( "Setting ...
Initialize first stream . Just for startup we are extra careful to try 3 times as there is potential race between finding the txid and then setting up the stream .
32,032
private void setupIngestStreamWithRetries ( long txid ) throws IOException { for ( int i = 0 ; i < inputStreamRetries ; i ++ ) { try { setupCurrentEditStream ( txid ) ; return ; } catch ( IOException e ) { if ( i == inputStreamRetries - 1 ) { throw new IOException ( "Cannot obtain stream for txid: " + txid , e ) ; } LO...
Setup the input stream to be consumed by the reader with retries on failures .
32,033
private void setupCurrentEditStream ( long txid ) throws IOException { currentEditLogInputStream = JournalSet . getInputStream ( remoteJournalManager , txid ) ; currentSegmentTxId = txid ; mostRecentlyReadTransactionTime = now ( ) ; }
Setup the input stream to be consumed by the reader . The input stream corresponds to a single segment .
32,034
boolean segmentExists ( long txid ) throws IOException { List < RemoteEditLog > segments = getManifest ( ) ; for ( RemoteEditLog segment : segments ) { if ( segment . getStartTxId ( ) == txid ) { return true ; } } return false ; }
Check if a segment of a given txid exists in the underlying storage directory . Whne the reader cannot read any new data it will periodically check if there was some unclean shutdown which results in an unfinalized log .
32,035
List < RemoteEditLog > getManifest ( ) throws IOException { RemoteEditLogManifest rm = remoteJournalManager . getEditLogManifest ( - 1 ) ; if ( rm == null || rm . getLogs ( ) . size ( ) == 0 ) { throw new IOException ( "Cannot obtain the list of log segments" ) ; } return rm . getLogs ( ) ; }
Get all available log segments present in the underlying storage directory . This function will never return null or empty list of segments - it will throw exception in this case .
32,036
protected void sleep ( long ms ) throws IOException { try { Thread . sleep ( ms ) ; } catch ( InterruptedException e ) { LOG . error ( "Interrupted when sleeping" , e ) ; Thread . currentThread ( ) . interrupt ( ) ; throw new IOException ( "Received interruption" ) ; } }
Sleep for n milliseconds . Throw IOException when interrupted .
32,037
static public CompressionType getCompressionType ( Configuration job ) { String name = job . get ( "io.seqfile.compression.type" ) ; return name == null ? CompressionType . RECORD : CompressionType . valueOf ( name ) ; }
Get the compression type for the reduce outputs
32,038
static public void setCompressionType ( Configuration job , CompressionType val ) { job . set ( "io.seqfile.compression.type" , val . toString ( ) ) ; }
Set the compression type for sequence files .
32,039
K put ( final K name ) { K internal = cache . get ( name ) ; if ( internal != null ) { lookups ++ ; return internal ; } if ( ! initialized ) { UseCount useCount = transientMap . get ( name ) ; if ( useCount != null ) { useCount . increment ( ) ; if ( useCount . get ( ) >= useThreshold ) { promote ( name ) ; } return us...
Add a given name to the cache or track use count . exist . If the name already exists then the internal value is returned .
32,040
void initialized ( ) { LOG . info ( "initialized with " + size ( ) + " entries " + lookups + " lookups" ) ; this . initialized = true ; transientMap . clear ( ) ; transientMap = null ; }
Mark the name cache as initialized . The use count is no longer tracked and the transient map used for initializing the cache is discarded to save heap space .
32,041
private void promote ( final K name ) { transientMap . remove ( name ) ; cache . put ( name , name ) ; lookups += useThreshold ; }
Promote a frequently used name to the cache
32,042
@ SuppressWarnings ( "unchecked" ) protected final void initialize ( int maxSize ) { size = 0 ; int heapSize = maxSize + 1 ; heap = ( T [ ] ) new Object [ heapSize ] ; this . maxSize = maxSize ; }
Subclass constructors must call this .
32,043
private void checkState ( ) { Preconditions . checkNotNull ( logs ) ; if ( ! contiguous ) return ; RemoteEditLog prev = null ; for ( RemoteEditLog log : logs ) { if ( prev != null ) { if ( log . getStartTxId ( ) <= prev . getEndTxId ( ) ) { throw new IllegalStateException ( "Invalid log manifest:" + this ) ; } } prev =...
Check that the logs are contiguous and non - overlapping sequences of transactions in sorted order
32,044
public double getSortProcessingRate ( long currentTime ) { long timeSpentSorting = 0 ; float progress = 0 ; Phase phase = getPhase ( ) ; long sortFinishTime = getSortFinishTime ( ) ; long shuffleFinishTime = getShuffleFinishTime ( ) ; if ( phase == Phase . SHUFFLE ) { return 0 ; } else if ( getPhase ( ) == Phase . SORT...
for sort phase use the accumulated progress rate as the processing rate
32,045
public double getReduceProcessingRate ( long currentTime ) { Phase phase = getPhase ( ) ; if ( phase != Phase . REDUCE ) { return 0 ; } @ SuppressWarnings ( "deprecation" ) long bytesProcessed = super . getCounters ( ) . findCounter ( Task . Counter . REDUCE_INPUT_BYTES ) . getCounter ( ) ; long timeSpentInReduce = 0 ;...
as the processing rate
32,046
private void readSessions ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "sessions" ) ; coronaSerializer . readStartObjectToken ( "sessions" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String sessionId = coronaSerial...
Reads back the sessions map from a JSON stream
32,047
public void restoreAfterSafeModeRestart ( ) { if ( ! clusterManager . safeMode ) { return ; } for ( Session session : sessions . values ( ) ) { for ( ResourceRequestInfo resourceRequestInfo : session . idToRequest . values ( ) ) { clusterManager . nodeManager . restoreResourceRequestInfo ( resourceRequestInfo ) ; } ses...
This method rebuilds members related to the SessionManager instance which were not directly persisted themselves .
32,048
public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "sessions" ) ; jsonGenerator . writeStartObject ( ) ; for ( String sessionId : sessions . keySet ( ) ) { jsonGenerator . writeFieldName ( sessionId ) ; sessions . get ( sessionI...
Used to write the state of the SessionManager instance to disk when we are persisting the state of the ClusterManager
32,049
public Map < PoolInfo , Long > getTypePoolInfoAveFirstWaitMs ( ResourceType type ) { Map < PoolInfo , WaitCount > poolInfoWaitCount = new HashMap < PoolInfo , WaitCount > ( ) ; for ( Session session : sessions . values ( ) ) { synchronized ( session ) { if ( ! session . isDeleted ( ) ) { Long wait = session . getTypeFi...
Get a map of pool infos to average wait times for first resource of a resource type .
32,050
protected DFSClient getDFSClient ( HttpServletRequest request ) throws IOException , InterruptedException { Configuration conf = new Configuration ( masterConf ) ; UnixUserGroupInformation . saveToConf ( conf , UnixUserGroupInformation . UGI_PROPERTY_NAME , getUGI ( request ) ) ; return JspHelper . getDFSClient ( reque...
getting a client for connecting to dfs
32,051
private DatanodeID [ ] getDatanodes ( HttpServletRequest request ) throws IOException { final String datanodes = request . getParameter ( "candidates" ) ; if ( datanodes == null ) { return null ; } final String [ ] datanodeStrs = datanodes . split ( " " ) ; if ( datanodeStrs . length == 0 ) { return null ; } final Data...
Get the datanode candidates from the request
32,052
int loadFSEdits ( EditLogInputStream edits , long lastAppliedTxId ) throws IOException { long startTime = now ( ) ; this . lastAppliedTxId = lastAppliedTxId ; int numEdits = loadFSEdits ( edits , true ) ; FSImage . LOG . info ( "Edits file " + edits . toString ( ) + " of size: " + edits . length ( ) + ", # of edits: " ...
Load an edit log and apply the changes to the in - memory structure This is where we apply edits that we ve been writing to disk all along .
32,053
private void checkFail ( String errorMsg ) throws IOException { if ( fsNamesys . failOnTxIdMismatch ( ) ) { FSEditLog . LOG . error ( errorMsg ) ; throw new IOException ( errorMsg ) ; } MetaRecoveryContext . editLogLoaderPrompt ( errorMsg ) ; }
When encountering an error while loading the transaction we can skip the problematic transaction and continue first prompting the user . This will only be possible when NN is started with appropriate option .
32,054
public static EditLogValidation validateEditLog ( EditLogInputStream in ) { long lastPos = 0 ; long firstTxId = HdfsConstants . INVALID_TXID ; long lastTxId = HdfsConstants . INVALID_TXID ; long numValid = 0 ; try { FSEditLogOp op = null ; while ( true ) { lastPos = in . getPosition ( ) ; try { if ( ( op = in . readOp ...
Return the number of valid transactions in the stream . If the stream is truncated during the header returns a value indicating that there are 0 valid transactions . This reads through the stream but does not close it .
32,055
public void initTasks ( ) throws IOException { jobHistory . logSubmitted ( jobFile . toString ( ) , this . startTime , this . jobTrackerId ) ; JobClient . RawSplit [ ] splits = null ; splits = JobClient . getAndRemoveCachedSplits ( jobId ) ; if ( splits == null ) { FileSystem fs = jobFile . getFileSystem ( jobConf ) ; ...
Read input splits and create a map per split .
32,056
private TaskInProgress removeMatchingTipUnprotectedUnconditional ( List < TaskInProgress > taskList , TaskInProgress intendedTip ) { for ( Iterator < TaskInProgress > iter = taskList . iterator ( ) ; iter . hasNext ( ) ; ) { TaskInProgress t = iter . next ( ) ; if ( t . getTIPId ( ) . equals ( intendedTip . getTIPId ( ...
Removes matching TIP without checking any conditions
32,057
public Task forceNewReduceTaskForTip ( String taskTrackerName , String hostName , TaskInProgress forcedTip ) { synchronized ( lockObject ) { Task result = obtainTaskCleanupTask ( taskTrackerName , forcedTip ) ; if ( result != null ) { return result ; } removeMatchingTipUnprotectedUnconditional ( nonRunningMaps , forced...
Registers new task attempt for given task
32,058
public boolean canTrackerBeUsed ( String taskTracker , String trackerHost , TaskInProgress tip ) { synchronized ( lockObject ) { return ! tip . hasFailedOnMachine ( trackerHost ) ; } }
Can a tracker be used for a TIP?
32,059
private void setJobCleanupTaskState ( Task task ) { if ( task . isJobCleanupTask ( ) ) { if ( jobFailed ) { task . setJobCleanupTaskState ( org . apache . hadoop . mapreduce . JobStatus . State . FAILED ) ; } else if ( jobKilled ) { task . setJobCleanupTaskState ( org . apache . hadoop . mapreduce . JobStatus . State ....
Sets task state according to job state if given task is cleanup one
32,060
public static String [ ] getJournalHttpHosts ( Configuration conf ) { Collection < String > hosts = conf . getStringCollection ( JournalConfigKeys . DFS_JOURNALNODE_HOSTS ) ; int defaultHttpPort = JournalConfigKeys . DFS_JOURNALNODE_HTTP_PORT_DEFAULT ; String [ ] httpAddresses = new String [ hosts . size ( ) ] ; int i ...
Parse the DFS_JOURNALNODE_HOSTS to get the list of Journal Node Hosts
32,061
public static InetSocketAddress getAddress ( Configuration conf ) { String addr = conf . get ( JournalConfigKeys . DFS_JOURNALNODE_HTTP_ADDRESS_KEY , JournalConfigKeys . DFS_JOURNALNODE_HTTP_ADDRESS_DEFAULT ) ; return NetUtils . createSocketAddr ( addr , JournalConfigKeys . DFS_JOURNALNODE_HTTP_PORT_DEFAULT ) ; }
Get the address bound by the JournalNode to start the web server .
32,062
public synchronized boolean addChild ( Node child ) { if ( child . parent != null ) { throw new IllegalArgumentException ( "The child is already under another node:" + child . parent ) ; } checkChildren ( ) ; boolean retval = children . add ( child ) ; if ( retval ) child . parent = this ; return retval ; }
Add a child node to this node .
32,063
static private void updateShares ( Cluster clusters [ ] ) { assert ( clusters . length == 2 ) ; if ( clusters [ 0 ] . runnableMaps == 0 && clusters [ 0 ] . runnableMaps == 0 && clusters [ 1 ] . runnableReduces == 0 && clusters [ 1 ] . runnableReduces == 0 ) { return ; } if ( ! ( clusters [ 0 ] . runnableMaps == 0 && cl...
Update the task share of the clusters
32,064
public void run ( ) { long lastUpdate = - 1L ; while ( running ) { try { Thread . sleep ( updateInterval / 10 ) ; long now = JobTracker . getClock ( ) . getTime ( ) ; if ( now - lastUpdate > updateInterval ) { lastUpdate = now ; doMoveSlots ( clusters ) ; } } catch ( Exception e ) { LOG . error ( "Exception while balan...
Keep moving slots between two clusters according to their runnable tasks . These clusters are assumed to run tasktrackers on the same set of machines
32,065
private int getTotalSlots ( TaskTrackerStatus status , TaskType type ) { Map < Integer , Integer > defaultCpuToMaxSlots = ( type == TaskType . MAP ) ? defaultCpuToMaxMapSlots : defaultCpuToMaxReduceSlots ; int cpus = status . getResourceStatus ( ) . getNumProcessors ( ) ; Integer slots = defaultCpuToMaxSlots . get ( cp...
Obtain the two clusters combined total slots of a tasktracker
32,066
private static FairSchedulerProtocol createClient ( String target , Configuration conf ) throws IOException { InetSocketAddress addr = NetUtils . createSocketAddr ( target ) ; UserGroupInformation ugi = UserGroupInformation . getCurrentUGI ( ) ; LOG . info ( "Connecting to " + addr ) ; return ( FairSchedulerProtocol ) ...
Create a FariScheduler RPC client
32,067
public static void main ( String argv [ ] ) { StringUtils . startupShutdownMessage ( HourGlass . class , argv , LOG ) ; try { HourGlass hourGlass = new HourGlass ( new Configuration ( ) ) ; hourGlass . run ( ) ; } catch ( Throwable e ) { LOG . fatal ( StringUtils . stringifyException ( e ) ) ; System . exit ( - 1 ) ; }...
Start the HourGlass process
32,068
public synchronized boolean reloadConfigsIfNecessary ( ) { long time = RaidNode . now ( ) ; if ( time > lastReloadAttempt + reloadInterval ) { lastReloadAttempt = time ; try { File file = new File ( configFileName ) ; long lastModified = file . lastModified ( ) ; if ( lastModified > lastSuccessfulReload && time > lastM...
Reload config file if it hasn t been loaded in a while Returns true if the file was reloaded .
32,069
void stopReload ( ) throws InterruptedException { if ( reloadThread != null ) { running = false ; reloadThread . interrupt ( ) ; reloadThread . join ( ) ; reloadThread = null ; } }
Stop the background thread that reload the config file
32,070
PolicyInfo getPolicy ( String policyName ) { for ( PolicyInfo policy : allPolicies ) { if ( policyName . equals ( policy . getName ( ) ) ) { return policy ; } } return null ; }
Find the PolicyInfo corresponding to a given policy name
32,071
public void stop ( ) { if ( stopRequested ) { return ; } stopRequested = true ; running = false ; if ( server != null ) server . stop ( ) ; if ( triggerThread != null ) { triggerThread . interrupt ( ) ; triggerMonitor = null ; } if ( urfThread != null ) { urfThread . interrupt ( ) ; urfProcessor = null ; } if ( blockIn...
Stop all RaidNode threads and wait for all to finish .
32,072
public Map < String , Map < CorruptFileStatus , Long > > getCorruptFilesCounterMap ( ) { return ( ( CorruptionWorker ) blockIntegrityMonitor . getCorruptionMonitor ( ) ) . getCorruptFilesCounterMap ( ) ; }
the value is counters of different types of corrupt files
32,073
public PolicyInfo determinePolicy ( Codec codec ) { for ( PolicyInfo info : configMgr . getAllPolicies ( ) ) { if ( ! info . getShouldRaid ( ) ) { continue ; } if ( info . getCodecId ( ) . equals ( codec . id ) ) { return info ; } } return null ; }
Determine a PolicyInfo from the codec to re - generate the parity files of modified source files .
32,074
public static boolean doRaid ( Configuration conf , FileStatus stat , Path destPath , Codec codec , Statistics statistics , Progressable reporter , boolean doSimulate , int targetRepl , int metaRepl ) throws IOException { boolean succeed = false ; for ( EncodingCandidate ec : RaidNode . splitPaths ( conf , codec , stat...
only used by test
32,075
public static boolean raidedByOtherHighPriCodec ( Configuration conf , FileStatus stat , Codec codec ) throws IOException { for ( Codec tcodec : Codec . getCodecs ( ) ) { if ( tcodec . priority > codec . priority ) { if ( stat . isDir ( ) && ! tcodec . isDirRaid ) { continue ; } if ( ParityFilePair . parityExists ( sta...
check if the file is already raided by high priority codec
32,076
private static LOGRESULTS doDirRaid ( Configuration conf , EncodingCandidate ec , Path destPath , Codec codec , Statistics statistics , Progressable reporter , boolean doSimulate , int targetRepl , int metaRepl ) throws IOException { FileStatus stat = ec . srcStat ; Path p = stat . getPath ( ) ; FileSystem srcFs = p . ...
RAID an individual directory
32,077
private static LOGRESULTS doFileRaid ( Configuration conf , EncodingCandidate ec , Path destPath , Codec codec , Statistics statistics , Progressable reporter , boolean doSimulate , int targetRepl , int metaRepl ) throws IOException , InterruptedException { FileStatus stat = ec . srcStat ; Path p = stat . getPath ( ) ;...
RAID an individual file
32,078
public static RaidNode createRaidNode ( Configuration conf ) throws ClassNotFoundException { try { Class < ? > raidNodeClass = conf . getClass ( RAIDNODE_CLASSNAME_KEY , DistRaidNode . class ) ; if ( ! RaidNode . class . isAssignableFrom ( raidNodeClass ) ) { throw new ClassNotFoundException ( "not an implementation of...
Create an instance of the appropriate subclass of RaidNode
32,079
public static RaidNode createRaidNode ( String argv [ ] , Configuration conf ) throws IOException , ClassNotFoundException { if ( conf == null ) { conf = new Configuration ( ) ; } StartupOption startOpt = parseArguments ( argv ) ; if ( startOpt == null ) { printUsage ( ) ; return null ; } setStartupOption ( conf , star...
Create an instance of the RaidNode
32,080
public static String getJobID ( Configuration conf ) { String jobId = conf . get ( "mapred.job.id" , null ) ; if ( jobId == null ) { jobId = "localRaid" + df . format ( new Date ( ) ) ; conf . set ( "mapred.job.id" , jobId ) ; } return jobId ; }
Get the job id from the configuration
32,081
public static String [ ] adjustConf ( String [ ] argv , Configuration conf ) { String [ ] serviceId = new String [ ] { "" } ; String [ ] filteredArgv = DFSUtil . getServiceName ( argv , serviceId ) ; if ( ! serviceId [ 0 ] . equals ( "" ) ) { NameNode . checkServiceName ( conf , serviceId [ 0 ] ) ; DFSUtil . setGeneric...
Adjust configuration for nameservice keys .
32,082
@ SuppressWarnings ( "unchecked" ) static Map < String , Integer > [ ] countCompanionBlocks ( Collection < LocatedBlock > companionBlocks ) { Map < String , Integer > [ ] result = new HashMap [ 2 ] ; result [ 0 ] = new HashMap < String , Integer > ( ) ; result [ 1 ] = new HashMap < String , Integer > ( ) ; for ( Locate...
Count how many companion blocks are on each datanode or the each rack
32,083
List < LocatedBlock > getCompanionBlocks ( String path , FileInfo info , Block block , FSInodeInfo inode ) throws IOException { Codec codec = info . codec ; switch ( info . type ) { case NOT_RAID : return Collections . emptyList ( ) ; case HAR_TEMP_PARITY : return getCompanionBlocksForHarParityBlock ( path , codec . pa...
Obtain the companion blocks of the give block Companion blocks are defined as the blocks that can help recover each others by using raid decoder .
32,084
NameWithINode getSourceFile ( String parity , String prefix ) throws IOException { if ( isHarFile ( parity ) ) { return null ; } String src = parity . substring ( prefix . length ( ) ) ; byte [ ] [ ] components = INodeDirectory . getPathComponents ( src ) ; INode inode = namesystem . dir . getINode ( components ) ; ret...
Get path for the corresponding source file for a valid parity file . Returns null if it does not exists
32,085
private NameWithINode getParityFile ( Codec codec , String src ) throws IOException { String parity ; if ( codec . isDirRaid ) { String parent = getParentPath ( src ) ; parity = codec . parityDirectory + parent ; } else { parity = codec . parityDirectory + src ; } byte [ ] [ ] components = INodeDirectory . getPathCompo...
Get path for the parity file . Returns null if it does not exists
32,086
protected FileInfo getFileInfo ( FSInodeInfo srcINode , String path ) throws IOException { for ( Codec c : Codec . getCodecs ( ) ) { if ( path . startsWith ( c . tmpHarDirectoryPS ) ) { return new FileInfo ( FileType . HAR_TEMP_PARITY , c ) ; } if ( path . startsWith ( c . tmpParityDirectoryPS ) ) { return new FileInfo...
Return raid information about a file for example if this file is the source file parity file or not raid
32,087
public boolean isMonitoring ( ) { boolean ret = true ; for ( MetricsContext ctxt : subctxt ) { ret &= ctxt . isMonitoring ( ) ; } return ret ; }
Return true if all subcontexts are monitoring .
32,088
public int read ( ) throws IOException { while ( true ) { lockR . lock ( ) ; try { if ( availableCount . get ( ) > 0 ) { int b = bytes [ readCursor ] & 0xFF ; incReadCursor ( 1 ) ; availableCount . decrementAndGet ( ) ; totalRead ++ ; return b ; } else if ( closed ) { return - 1 ; } } finally { lockR . unlock ( ) ; } s...
Read a single byte . Blocks until data is available or fail if buffer is closed .
32,089
public int read ( byte [ ] buf , int off , int len ) throws IOException { while ( true ) { lockR . lock ( ) ; try { int available = availableCount . get ( ) ; if ( available > 0 ) { final int lenToRead = Math . min ( available , len ) ; final int lenForward = Math . min ( lenToRead , length - readCursor ) ; final int l...
Read data to the buffer starting at offset off . Will block until data available or the input has been closed .
32,090
public void write ( byte [ ] buf , int off , int len ) throws IOException { while ( true ) { lockW . lock ( ) ; try { checkClosed ( ) ; final int lenToWrite = Math . min ( len , length - availableCount . get ( ) ) ; final int lenForward = Math . min ( lenToWrite , length - writeCursor ) ; final int lenRemaining = lenTo...
Write buf to the buffer will block until it can write len bytes . Will fail if buffer is closed .
32,091
public void write ( int b ) throws IOException { while ( true ) { lockW . lock ( ) ; try { checkClosed ( ) ; if ( length - availableCount . get ( ) > 0 ) { bytes [ writeCursor ] = ( byte ) b ; incWriteCursor ( 1 ) ; availableCount . incrementAndGet ( ) ; totalWritten ++ ; return ; } } finally { lockW . unlock ( ) ; } s...
Write a single byte to the buffer . Will block until the byte can be written or fail when buffer is closed .
32,092
private static JobConf createJobConf ( Configuration conf ) { JobConf jobconf = new JobConf ( conf , DistRaid . class ) ; jobName = NAME + " " + dateForm . format ( new Date ( RaidNode . now ( ) ) ) ; jobconf . setUser ( RaidNode . JOBUSER ) ; jobconf . setJobName ( jobName ) ; jobconf . setMapSpeculativeExecution ( fa...
create new job conf based on configuration passed .
32,093
public void addRaidPaths ( PolicyInfo info , List < EncodingCandidate > paths ) { raidPolicyPathPairList . add ( new RaidPolicyPathPair ( info , paths ) ) ; }
Add paths to be raided
32,094
public boolean startDistRaid ( ) throws IOException { assert ( raidPolicyPathPairList . size ( ) > 0 ) ; if ( setup ( ) ) { this . jobClient = new JobClient ( jobconf ) ; this . runningJob = this . jobClient . submitJob ( jobconf ) ; LOG . info ( "Job Started: " + runningJob . getID ( ) ) ; this . startTime = System . ...
Invokes a map - reduce job do parallel raiding .
32,095
public boolean checkComplete ( ) throws IOException { JobID jobID = runningJob . getID ( ) ; if ( runningJob . isComplete ( ) ) { final String jobdir = jobconf . get ( JOB_DIR_LABEL ) ; if ( jobdir != null ) { final Path jobpath = new Path ( jobdir ) ; jobpath . getFileSystem ( jobconf ) . delete ( jobpath , true ) ; }...
Checks if the map - reduce job has completed .
32,096
private boolean setup ( ) throws IOException { estimateSavings ( ) ; final String randomId = getRandomId ( ) ; JobClient jClient = new JobClient ( jobconf ) ; Path jobdir = new Path ( jClient . getSystemDir ( ) , NAME + "_" + randomId ) ; LOG . info ( JOB_DIR_LABEL + "=" + jobdir ) ; jobconf . set ( JOB_DIR_LABEL , job...
set up input file which has the list of input files .
32,097
private ArrayList < String > getStrings ( Object o ) { ArrayList < String > retval = new ArrayList < String > ( ) ; retval . clear ( ) ; if ( o == null ) retval . add ( "null" ) ; else if ( o instanceof String ) retval . add ( ( String ) o ) ; else if ( o instanceof Calendar ) retval . add ( dateFormatter . format ( ( ...
Extract String representations from an Object .
32,098
public static ClientProtocol createNamenode ( Configuration conf ) throws IOException { return createNamenode ( NameNode . getClientProtocolAddress ( conf ) , conf ) ; }
The locking hierarchy is to first acquire lock on DFSClient object followed by lock on leasechecker followed by lock on an individual DFSOutputStream .
32,099
private void createRPCNamenodeIfCompatible ( InetSocketAddress nameNodeAddr , Configuration conf , UserGroupInformation ugi ) throws IOException { try { this . namenodeProtocolProxy = createRPCNamenode ( nameNodeAddr , conf , ugi , namenodeRPCSocketTimeout ) ; this . rpcNamenode = namenodeProtocolProxy . getProxy ( ) ;...
Create a NameNode proxy for the client if the client and NameNode are compatible