idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
31,800
public void registerUpdater ( Scheduler scheduler , SessionNotifier sessionNotifier ) { this . scheduler = scheduler ; this . sessionNotifier = sessionNotifier ; context . registerUpdater ( this ) ; }
Set the scheduler and start the updating . The metrics won t be reported until this is called .
31,801
private Map < SessionStatus , MetricsTimeVaryingInt > createSessionStatusToMetricsMap ( ) { Map < SessionStatus , MetricsTimeVaryingInt > m = new HashMap < SessionStatus , MetricsTimeVaryingInt > ( ) ; for ( SessionStatus endState : SESSION_END_STATES ) { String name = endState . toString ( ) . toLowerCase ( ) + "_sessions" ; m . put ( endState , new MetricsTimeVaryingInt ( name , registry ) ) ; } return m ; }
Create a map of session status - > metrics .
31,802
private Map < ResourceType , MetricsIntValue > createTypeToCountMap ( Collection < ResourceType > resourceTypes , String actionType ) { Map < ResourceType , MetricsIntValue > m = new HashMap < ResourceType , MetricsIntValue > ( ) ; for ( ResourceType t : resourceTypes ) { String name = ( actionType + "_" + t ) . toLowerCase ( ) ; MetricsIntValue value = new MetricsIntValue ( name , registry ) ; m . put ( t , value ) ; } return m ; }
Create a map of resource type - > current count .
31,803
private Map < ResourceType , MetricsTimeVaryingLong > createTypeToResourceCountMap ( Collection < ResourceType > resourceTypes , String actionType ) { Map < ResourceType , MetricsTimeVaryingLong > m = new HashMap < ResourceType , MetricsTimeVaryingLong > ( ) ; for ( ResourceType t : resourceTypes ) { String name = ( actionType + "_" + t ) . toLowerCase ( ) ; MetricsTimeVaryingLong value = new MetricsTimeVaryingLong ( name , registry ) ; m . put ( t , value ) ; } return m ; }
Create a map of resource type - > cumulative counts .
31,804
private long makeUpRuntime ( List < LoggedDiscreteCDF > mapAttemptCDFs ) { int total = 0 ; for ( LoggedDiscreteCDF cdf : mapAttemptCDFs ) { total += cdf . getNumberValues ( ) ; } if ( total == 0 ) { return - 1 ; } int index = random . nextInt ( total ) ; for ( LoggedDiscreteCDF cdf : mapAttemptCDFs ) { if ( index >= cdf . getNumberValues ( ) ) { index -= cdf . getNumberValues ( ) ; } else { if ( index < 0 ) { throw new IllegalStateException ( "application error" ) ; } return makeUpRuntime ( cdf ) ; } } throw new IllegalStateException ( "not possible to get here" ) ; }
Perform a weighted random selection on a list of CDFs and produce a random variable using the selected CDF .
31,805
public void execute ( ) throws BuildException { if ( src == null && filesets . size ( ) == 0 ) { throw new BuildException ( "There must be a file attribute or a fileset child element" ) ; } if ( src != null ) { doCompile ( src ) ; } Project myProject = getProject ( ) ; for ( int i = 0 ; i < filesets . size ( ) ; i ++ ) { FileSet fs = filesets . get ( i ) ; DirectoryScanner ds = fs . getDirectoryScanner ( myProject ) ; File dir = fs . getDir ( myProject ) ; String [ ] srcs = ds . getIncludedFiles ( ) ; for ( int j = 0 ; j < srcs . length ; j ++ ) { doCompile ( new File ( dir , srcs [ j ] ) ) ; } } }
Invoke the Hadoop record compiler on each record definition file
31,806
public int compareTo ( Object o ) { long thisValue = this . value ; long thatValue = ( ( VLongWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; }
Compares two VLongWritables .
31,807
private static void createInputDirectory ( FileSystem fs , Path dir , Pentomino pent , int depth ) throws IOException { fs . mkdirs ( dir ) ; List < int [ ] > splits = pent . getSplits ( depth ) ; PrintStream file = new PrintStream ( new BufferedOutputStream ( fs . create ( new Path ( dir , "part1" ) ) , 64 * 1024 ) ) ; for ( int [ ] prefix : splits ) { for ( int i = 0 ; i < prefix . length ; ++ i ) { if ( i != 0 ) { file . print ( ',' ) ; } file . print ( prefix [ i ] ) ; } file . print ( '\n' ) ; } file . close ( ) ; }
Create the input file with all of the possible combinations of the given depth .
31,808
public static InetSocketAddress getAddress ( JobConf conf , String key ) { String str = conf . get ( key ) ; if ( str == null ) return null ; String hostPortPair [ ] = str . split ( ":" ) ; if ( hostPortPair . length != 2 ) return null ; return new InetSocketAddress ( hostPortPair [ 0 ] , Integer . parseInt ( hostPortPair [ 1 ] ) ) ; }
Deserialize InetSocketAddress from String from given key in conf
31,809
public static void setAddress ( JobConf conf , String key , InetSocketAddress address ) { if ( address == null ) { conf . unset ( key ) ; return ; } String addrStr = address . getHostName ( ) + ":" + address . getPort ( ) ; conf . set ( key , addrStr ) ; }
Serialize InetSocketAddress to String and saves in given conf
31,810
private void snapshotConfig ( ) { maximum = configManager . getPoolGroupMaximum ( getName ( ) , getType ( ) ) ; minimum = configManager . getPoolGroupMinimum ( getName ( ) , getType ( ) ) ; }
Get the snapshot of the configuration from the configuration manager
31,811
public Queue < PoolSchedulable > getScheduleQueue ( ) { if ( scheduleQueue == null ) { ScheduleComparator sc = configManager . getPoolGroupComparator ( getName ( ) ) ; scheduleQueue = createPoolQueue ( sc ) ; } return scheduleQueue ; }
Get the queue of pools sorted for scheduling
31,812
public Queue < PoolSchedulable > getPreemptQueue ( ) { ScheduleComparator sPreempt = null ; if ( preemptQueue == null ) { ScheduleComparator sc = configManager . getPoolGroupComparator ( getName ( ) ) ; if ( sc == ScheduleComparator . PRIORITY ) { sPreempt = ScheduleComparator . PRIORITY_PREEMPT ; } else { throw new IllegalArgumentException ( "Unknown/misconfigured poolgroup" ) ; } preemptQueue = createPoolQueue ( sPreempt ) ; } return preemptQueue ; }
Get the queue of the pool sorted for preemption
31,813
private Queue < PoolSchedulable > createPoolQueue ( ScheduleComparator comparator ) { int initCapacity = snapshotPools . size ( ) == 0 ? 1 : snapshotPools . size ( ) ; Queue < PoolSchedulable > poolQueue = new PriorityQueue < PoolSchedulable > ( initCapacity , comparator ) ; poolQueue . addAll ( snapshotPools ) ; return poolQueue ; }
Put all the pools into the priority queue sorted by a comparator
31,814
public PoolSchedulable getPool ( PoolInfo poolInfo ) { PoolSchedulable pool = nameToMap . get ( poolInfo ) ; if ( pool == null ) { pool = new PoolSchedulable ( poolInfo , getType ( ) , configManager ) ; PoolSchedulable prevPool = nameToMap . putIfAbsent ( poolInfo , pool ) ; if ( prevPool != null ) { pool = prevPool ; } } return pool ; }
Get a pool creating it if it does not exist . Note that these pools are never removed .
31,815
@ SuppressWarnings ( "unchecked" ) public Class < ? extends Reducer < ? , ? , ? , ? > > getCombinerClass ( ) throws ClassNotFoundException { return ( Class < ? extends Reducer < ? , ? , ? , ? > > ) conf . getClass ( COMBINE_CLASS_ATTR , null ) ; }
Get the combiner class for the job .
31,816
public int compareTo ( Object o ) { long thisValue = this . value ; long thatValue = ( ( LongWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; }
Compares two LongWritables .
31,817
public static void processButtons ( HttpServletRequest request , HttpServletResponse response , JobTracker tracker ) throws IOException { if ( conf . getBoolean ( PRIVATE_ACTIONS_KEY , false ) && request . getParameter ( "killJobs" ) != null ) { String [ ] jobs = request . getParameterValues ( "jobCheckBox" ) ; if ( jobs != null ) { for ( String job : jobs ) { tracker . killJob ( JobID . forName ( job ) ) ; } } } if ( conf . getBoolean ( PRIVATE_ACTIONS_KEY , false ) && request . getParameter ( "changeJobPriority" ) != null ) { String [ ] jobs = request . getParameterValues ( "jobCheckBox" ) ; if ( jobs != null ) { JobPriority jobPri = JobPriority . valueOf ( request . getParameter ( "setJobPriority" ) ) ; for ( String job : jobs ) { tracker . setJobPriority ( JobID . forName ( job ) , jobPri ) ; } } } }
Method used to process the request from the job page based on the request which it has received . For example like changing priority .
31,818
public static String getJobDetailsHistoryLink ( JobTracker tracker , String jobId ) { RetireJobInfo info = tracker . retireJobs . get ( JobID . forName ( jobId ) ) ; String historyFileUrl = getHistoryFileUrl ( info ) ; String result = ( historyFileUrl == null ? "" : "jobdetailshistory.jsp?jobid=" + jobId + "&logFile=" + historyFileUrl ) ; return result ; }
Given jobId resolve the link to jobdetailshistory . jsp
31,819
private static String getHistoryFileUrl ( RetireJobInfo info ) { String historyFile = info . getHistoryFile ( ) ; String historyFileUrl = null ; if ( historyFile != null && ! historyFile . equals ( "" ) ) { try { historyFileUrl = URLEncoder . encode ( info . getHistoryFile ( ) , "UTF-8" ) ; } catch ( UnsupportedEncodingException e ) { LOG . warn ( "Can't create history url " , e ) ; } } return historyFileUrl ; }
Obtain history file URL from RetireJobInfo
31,820
public static String generateClusterResTable ( JobTracker tracker ) throws IOException { ResourceReporter reporter = tracker . getResourceReporter ( ) ; if ( reporter == null ) { return "" ; } StringBuffer sb = new StringBuffer ( ) ; sb . append ( "<table border=\"1\" cellpadding=\"5\" cellspacing=\"0\">\n" ) ; sb . append ( "<tr>\n" ) ; sb . append ( "<th colspan=3>CPU</th>\n" ) ; sb . append ( "<th colspan=3>MEM</th>\n" ) ; sb . append ( "<th rowspan=2>Reported</th>\n" ) ; sb . append ( "</tr>\n" ) ; sb . append ( "<tr>\n" ) ; sb . append ( "<th>Total</th><th>Used</th><th>%</th>\n" ) ; sb . append ( "<th>Total</th><th>Used</th><th>%</th>\n" ) ; sb . append ( "</tr>\n" ) ; sb . append ( "<tr>\n" ) ; sb . append ( String . format ( "<td>%.1f GHz</td><td>%.1f GHz</td><td>%.1f%%</td>\n" , reporter . getClusterCpuTotalGHz ( ) , reporter . getClusterCpuUsageGHz ( ) , Math . min ( reporter . getClusterCpuUsageGHz ( ) / reporter . getClusterCpuTotalGHz ( ) * 100D , 100D ) ) ) ; sb . append ( String . format ( "<td>%.1f GB</td><td>%.1f GB</td><td>%.1f%%</td><td>%d</td>\n" , reporter . getClusterMemTotalGB ( ) , reporter . getClusterMemUsageGB ( ) , reporter . getClusterMemUsageGB ( ) / reporter . getClusterMemTotalGB ( ) * 100D , reporter . getReportedTaskTrackers ( ) ) ) ; sb . append ( "</tr>\n" ) ; sb . append ( "</table>\n" ) ; return sb . toString ( ) ; }
Method used to generate the cluster resource utilization table
31,821
private void setupInputStream ( ) throws IOException { HttpURLConnection connection = ( HttpURLConnection ) url . openConnection ( ) ; connection . setConnectTimeout ( httpTimeout ) ; connection . setReadTimeout ( httpTimeout ) ; if ( connection . getResponseCode ( ) != HttpURLConnection . HTTP_OK ) { throw new IOException ( "Fetch of " + url + " failed with status code " + connection . getResponseCode ( ) + "\nResponse message:\n" + connection . getResponseMessage ( ) ) ; } String contentLength = connection . getHeaderField ( TransferFsImage . CONTENT_LENGTH ) ; if ( contentLength != null ) { advertisedSize = Long . parseLong ( contentLength ) ; if ( advertisedSize <= 0 ) { throw new IOException ( "Invalid " + TransferFsImage . CONTENT_LENGTH + " header: " + contentLength ) ; } } else { throw new IOException ( TransferFsImage . CONTENT_LENGTH + " header is not provided " + "by the server when trying to fetch " + url ) ; } digest = TransferFsImage . parseMD5Header ( connection ) ; if ( digest == null ) { throw new IOException ( "Image digest not provided for url: " + url ) ; } inputStream = connection . getInputStream ( ) ; initialized = true ; }
Get input stream for the image through http connection .
31,822
public Map < Integer , Map < ResourceType , Integer > > getCpuToResourcePartitioning ( ) { if ( cachedCpuToResourcePartitioning == null ) { cachedCpuToResourcePartitioning = getUncachedCpuToResourcePartitioning ( this ) ; } return cachedCpuToResourcePartitioning ; }
Get and cache the cpu to resource partitioning for this object .
31,823
public PoolInfo getPoolInfo ( ) { String poolNameProperty = get ( IMPLICIT_POOL_PROPERTY , "user.name" ) ; String explicitPool = get ( EXPLICIT_POOL_PROPERTY , get ( poolNameProperty , "" ) ) . trim ( ) ; String [ ] poolInfoSplitString = explicitPool . split ( "[.]" ) ; if ( poolInfoSplitString != null && poolInfoSplitString . length == 2 ) { return new PoolInfo ( poolInfoSplitString [ 0 ] , poolInfoSplitString [ 1 ] ) ; } else if ( ! explicitPool . isEmpty ( ) ) { return new PoolInfo ( PoolGroupManager . DEFAULT_POOL_GROUP , explicitPool ) ; } else { return PoolGroupManager . DEFAULT_POOL_INFO ; } }
Get the pool info . In order to support previous behavior a single pool name is accepted .
31,824
public boolean readMetaDataIfNeeded ( ) throws IOException { if ( eofReached ) { return false ; } if ( rawBlockOffset == 0 ) { try { metaDataConsumer . readMetaData ( in , metaDataBlockSize ) ; rawBlockOffset += metaDataBlockSize ; } catch ( EOFException e ) { eofReached = true ; return false ; } } return true ; }
Returns whether we ve reached EOF .
31,825
private boolean seekOrSkip ( long bytes , boolean toNewSource ) throws IOException { if ( seekableIn != null ) { int available = in . available ( ) ; try { if ( toNewSource ) { return seekableIn . seekToNewSource ( seekableIn . getPos ( ) + bytes ) ; } else { seekableIn . seek ( seekableIn . getPos ( ) + bytes ) ; return true ; } } catch ( IOException e ) { if ( bytes > available && "Cannot seek after EOF" . equals ( e . getMessage ( ) ) ) { eofReached = true ; throw new EOFException ( e . getMessage ( ) ) ; } } } else { long toSkip = bytes ; while ( toSkip > 0 ) { long skipped = in . skip ( toSkip ) ; if ( skipped <= 0 ) { throw new EOFException ( "skip returned " + skipped ) ; } toSkip -= skipped ; } ; } return true ; }
This function depends on the underlying
31,826
protected boolean rawSkip ( long bytes , boolean toNewSource ) throws IOException { boolean result = seekOrSkip ( bytes , toNewSource ) ; setRawOffset ( getRawOffset ( ) + bytes ) ; if ( rawBlockOffset > 0 && rawBlockOffset < metaDataBlockSize ) { throw new IOException ( "Cannot jump into the middle of a MetaDataBlock. MetaDataBlockSize = " + metaDataBlockSize + " and we are at " + rawBlockOffset ) ; } return result ; }
Skip some bytes from the raw InputStream .
31,827
public static void prepare ( String fname ) { if ( ! "Linux" . equalsIgnoreCase ( System . getProperty ( "os.name" ) ) ) { System . err . println ( "Linux system required for FailMon. Exiting..." ) ; System . exit ( 0 ) ; } System . setProperty ( "log4j.configuration" , "conf/log4j.properties" ) ; PropertyConfigurator . configure ( "conf/log4j.properties" ) ; LOG = LogFactory . getLog ( "org.apache.hadoop.contrib.failmon" ) ; logInfo ( "********** FailMon started ***********" ) ; PersistentState . readState ( "conf/parsing.state" ) ; try { FileInputStream propFile = new FileInputStream ( fname ) ; fmProperties . load ( propFile ) ; propFile . close ( ) ; } catch ( FileNotFoundException e1 ) { e1 . printStackTrace ( ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } ready = true ; try { String sudo_prompt = "passwd_needed:" ; String echo_txt = "access_ok" ; Process p = Runtime . getRuntime ( ) . exec ( "sudo -S -p " + sudo_prompt + " echo " + echo_txt ) ; InputStream inps = p . getInputStream ( ) ; InputStream errs = p . getErrorStream ( ) ; while ( inps . available ( ) < echo_txt . length ( ) && errs . available ( ) < sudo_prompt . length ( ) ) Thread . sleep ( 100 ) ; byte [ ] buf ; String s ; if ( inps . available ( ) >= echo_txt . length ( ) ) { buf = new byte [ inps . available ( ) ] ; inps . read ( buf ) ; s = new String ( buf ) ; if ( s . startsWith ( echo_txt ) ) { superuser = true ; logInfo ( "Superuser privileges found!" ) ; } else { superuser = false ; logInfo ( "Superuser privileges not found." ) ; } } } catch ( IOException e ) { e . printStackTrace ( ) ; } catch ( InterruptedException e ) { e . printStackTrace ( ) ; } }
Initializes structures needed by other methods . Also determines whether the executing user has superuser privileges .
31,828
public static int getInterval ( ArrayList < MonitorJob > monitors ) { String tmp = getProperty ( "executor.interval.min" ) ; if ( tmp != null ) MIN_INTERVAL = Integer . parseInt ( tmp ) ; int [ ] monIntervals = new int [ monitors . size ( ) ] ; for ( int i = 0 ; i < monitors . size ( ) ; i ++ ) monIntervals [ i ] = monitors . get ( i ) . interval ; return Math . max ( MIN_INTERVAL , gcd ( monIntervals ) ) ; }
Determines the minimum interval at which the executor thread needs to wake upto execute jobs . Essentially this is interval equals the GCD of intervals of all scheduled jobs .
31,829
public static boolean checkExistence ( String cmd ) { StringBuffer sb = runCommand ( "which " + cmd ) ; if ( sb . length ( ) > 1 ) return true ; return false ; }
Checks whether a specific shell command is available in the system .
31,830
public static StringBuffer runCommand ( String [ ] cmd ) { StringBuffer retval = new StringBuffer ( MAX_OUTPUT_LENGTH ) ; Process p ; try { p = Runtime . getRuntime ( ) . exec ( cmd ) ; InputStream tmp = p . getInputStream ( ) ; p . waitFor ( ) ; int c ; while ( ( c = tmp . read ( ) ) != - 1 ) retval . append ( ( char ) c ) ; } catch ( IOException e ) { e . printStackTrace ( ) ; } catch ( InterruptedException e ) { e . printStackTrace ( ) ; } return retval ; }
Runs a shell command in the system and provides a StringBuffer with the output of the command .
31,831
protected URI createUri ( FileStatus i , UnixUserGroupInformation ugi , ClientProtocol nnproxy , HttpServletRequest request ) throws IOException , URISyntaxException { return createUri ( i . getPath ( ) . toString ( ) , pickSrcDatanode ( i , nnproxy ) , ugi , request ) ; }
Create a redirection URI
31,832
private static DatanodeInfo [ ] pickSrcDatanode ( FileStatus i , ClientProtocol nnproxy ) throws IOException { if ( jspHelper == null ) jspHelper = new JspHelper ( ) ; final LocatedBlocks blks = nnproxy . getBlockLocations ( i . getPath ( ) . toUri ( ) . getPath ( ) , 0 , 1 ) ; if ( i . getLen ( ) == 0 || blks . getLocatedBlocks ( ) . size ( ) <= 0 ) { return new DatanodeInfo [ ] { jspHelper . randomNode ( ) } ; } return jspHelper . bestNode ( blks ) ; }
Select a datanode to service this request which is the first one in the returned array . The rest of the elements in the datanode are possible candidates if the first one fails . Currently this looks at no more than the first five blocks of a file selecting a datanode randomly from the most represented .
31,833
public static void skip ( DataInput in ) throws IOException { int length = in . readUnsignedShort ( ) ; WritableUtils . skipFully ( in , length ) ; }
Skips over one UTF8 in the input .
31,834
public int compareTo ( Object o ) { UTF8 that = ( UTF8 ) o ; return WritableComparator . compareBytes ( bytes , 0 , length , that . bytes , 0 , that . length ) ; }
Compare two UTF8s .
31,835
public static byte [ ] getBytes ( String string ) { byte [ ] result = new byte [ utf8Length ( string ) ] ; try { writeChars ( result , string , 0 , string . length ( ) ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } return result ; }
Convert a string to a UTF - 8 encoded byte array .
31,836
public static String readString ( DataInput in ) throws IOException { int bytes = in . readUnsignedShort ( ) ; return readChars ( in , bytes ) ; }
Read a UTF - 8 encoded string .
31,837
public static int writeString ( DataOutput out , String s ) throws IOException { if ( s . length ( ) > 0xffff / 3 ) { LOG . warn ( "truncating long string: " + s . length ( ) + " chars, starting with " + s . substring ( 0 , 20 ) ) ; s = s . substring ( 0 , 0xffff / 3 ) ; } int len = utf8Length ( s ) ; if ( len > 0xffff ) throw new IOException ( "string too long!" ) ; out . writeShort ( len ) ; writeChars ( out , s , 0 , s . length ( ) ) ; return len ; }
Write a UTF - 8 encoded string .
31,838
private static int utf8Length ( String string ) { int stringLength = string . length ( ) ; int utf8Length = 0 ; for ( int i = 0 ; i < stringLength ; i ++ ) { int c = string . charAt ( i ) ; if ( c <= 0x007F ) { utf8Length ++ ; } else if ( c > 0x07FF ) { utf8Length += 3 ; } else { utf8Length += 2 ; } } return utf8Length ; }
Returns the number of bytes required to write this .
31,839
public void create ( ) throws IOException { fc . truncate ( 0 ) ; fc . position ( 0 ) ; doubleBuf . getCurrentBuf ( ) . writeInt ( FSConstants . LAYOUT_VERSION ) ; setReadyToFlush ( ) ; flush ( ) ; }
Create empty edits logs file .
31,840
public void setReadyToFlush ( ) throws IOException { doubleBuf . getCurrentBuf ( ) . write ( FSEditLogOpCodes . OP_INVALID . getOpCode ( ) ) ; doubleBuf . setReadyToFlush ( ) ; }
All data that has been written to the stream so far will be flushed . New data can be still written to the stream while flushing is performed .
31,841
protected void flushAndSync ( boolean durable ) throws IOException { if ( fp == null ) { throw new IOException ( "Trying to use aborted output stream" ) ; } preallocate ( ) ; if ( doubleBuf . isFlushed ( ) ) { return ; } doubleBuf . flushTo ( fp ) ; if ( durable ) { fc . force ( false ) ; } fc . position ( fc . position ( ) - 1 ) ; }
Flush ready buffer to persistent store . currentBuffer is not flushed as it accumulates new log records while readyBuffer will be flushed and synced .
31,842
private void preallocate ( ) throws IOException { long position = fc . position ( ) ; long triggerSize = Math . max ( FSEditLog . preallocateSize / 100 , 4096 ) ; if ( position + triggerSize >= fc . size ( ) ) { if ( FSNamesystem . LOG . isDebugEnabled ( ) ) { FSNamesystem . LOG . debug ( "Preallocating Edit log, current size " + fc . size ( ) ) ; } fill . position ( 0 ) ; int written = fc . write ( fill , position ) ; if ( FSNamesystem . LOG . isDebugEnabled ( ) ) { FSNamesystem . LOG . debug ( "Edit log size is now " + fc . size ( ) + " written " + written + " bytes " + " at offset " + position ) ; } } }
allocate a big chunk of data
31,843
private HashMap < DatanodeDescriptor , Integer > directoryDataNodeUsage ( INodeDirectory dir , int threshold ) { HashMap < DatanodeDescriptor , Integer > dataNodeUsage = new HashMap < DatanodeDescriptor , Integer > ( ) ; List < INode > children ; nameSystem . readLock ( ) ; try { if ( dir . getChildrenRaw ( ) == null ) { return dataNodeUsage ; } children = new ArrayList < INode > ( dir . getChildrenRaw ( ) ) ; Collections . shuffle ( children ) ; for ( INode node : children ) { if ( ! ( node instanceof INodeFile ) ) { continue ; } INodeFile file = ( INodeFile ) node ; BlockInfo [ ] blocks = file . getBlocks ( ) ; for ( BlockInfo block : blocks ) { if ( threshold == 0 ) { return dataNodeUsage ; } int replication = block . numNodes ( ) ; for ( int i = 0 ; i < replication ; i ++ ) { DatanodeDescriptor datanode = block . getDatanode ( i ) ; Integer currentUsage = dataNodeUsage . get ( datanode ) ; dataNodeUsage . put ( datanode , currentUsage == null ? 1 : currentUsage + 1 ) ; } threshold -- ; } } } finally { nameSystem . readUnlock ( ) ; } return dataNodeUsage ; }
Iterates through files in the directory dir and counts the number of usages of datanodes for the files in the directory dir . Just iterates through threshold number of blocks .
31,844
protected static void checkSource ( Configuration conf , List < Path > srcs ) throws InvalidInputException { List < IOException > ioes = new ArrayList < IOException > ( ) ; for ( Path p : srcs ) { try { if ( ! p . getFileSystem ( conf ) . exists ( p ) ) { ioes . add ( new FileNotFoundException ( "Source " + p + " does not exist." ) ) ; } } catch ( IOException e ) { ioes . add ( e ) ; } } if ( ! ioes . isEmpty ( ) ) { throw new InvalidInputException ( ioes ) ; } }
Sanity check for source
31,845
protected void connect ( ) { LOG . info ( "Connecting to collector..." ) ; try { conf . setStrings ( UnixUserGroupInformation . UGI_PROPERTY_NAME , new String [ ] { "hadoop" , "hadoop" } ) ; rpcCollector = ( UtilizationCollectorProtocol ) RPC . getProxy ( UtilizationCollectorProtocol . class , UtilizationCollectorProtocol . versionID , UtilizationCollector . getAddress ( conf ) , conf ) ; } catch ( IOException e ) { LOG . error ( "Cannot connect to UtilizationCollector server. Retry in " + DEFAULT_MIRROR_PERIOD + " milliseconds." ) ; return ; } LOG . info ( "Connection established" ) ; }
Make connection to the Collector
31,846
protected void fetchData ( ) throws IOException { if ( rpcCollector == null ) { try { Thread . sleep ( RECONNECT_PERIOD ) ; } catch ( InterruptedException e ) { } connect ( ) ; } try { clusterUtil = rpcCollector . getClusterUtilization ( ) ; for ( JobUtilization job : rpcCollector . getAllRunningJobUtilization ( ) ) { allJobUtil . put ( job . getJobId ( ) , job ) ; } for ( TaskTrackerUtilization tt : rpcCollector . getAllTaskTrackerUtilization ( ) ) { allTaskTrackerUtil . put ( tt . getHostName ( ) , tt ) ; } } catch ( Exception e ) { clusterUtil = null ; allJobUtil . clear ( ) ; allTaskTrackerUtil . clear ( ) ; LOG . warn ( "Error obtaining data from Collector." ) ; } }
Mirror data from the Collector
31,847
public float getCapacity ( String queue ) { String raw = rmConf . getRaw ( toFullPropertyName ( queue , CAPACITY_PROPERTY ) ) ; if ( raw == null ) { return - 1 ; } float result = rmConf . getFloat ( toFullPropertyName ( queue , CAPACITY_PROPERTY ) , - 1 ) ; if ( result < 0.0 || result > 100.0 ) { throw new IllegalArgumentException ( "Illegal capacity for queue " + queue + " of " + result ) ; } return result ; }
Get the percentage of the cluster for the specified queue .
31,848
public void setCapacity ( String queue , float capacity ) { rmConf . setFloat ( toFullPropertyName ( queue , CAPACITY_PROPERTY ) , capacity ) ; }
Sets the capacity of the given queue .
31,849
public float getMaxCapacity ( String queue ) { float result = rmConf . getFloat ( toFullPropertyName ( queue , MAX_CAPACITY_PROPERTY ) , - 1 ) ; result = ( result <= 0 ) ? - 1 : result ; if ( result > 100.0 ) { throw new IllegalArgumentException ( "Illegal " + MAX_CAPACITY_PROPERTY + " for queue " + queue + " of " + result ) ; } if ( ( result != - 1 ) && ( result < getCapacity ( queue ) ) ) { throw new IllegalArgumentException ( MAX_CAPACITY_PROPERTY + " " + result + " for a queue should be greater than or equal to capacity " ) ; } return result ; }
Return the maximum percentage of the cluster capacity that can be used by the given queue . This percentage defines a limit beyond which a queue cannot use the capacity of cluster . This provides a means to limit how much excess capacity a queue can use . By default there is no limit .
31,850
public void setMaxCapacity ( String queue , float maxCapacity ) { rmConf . setFloat ( toFullPropertyName ( queue , MAX_CAPACITY_PROPERTY ) , maxCapacity ) ; }
Sets the maxCapacity of the given queue .
31,851
public int getMinimumUserLimitPercent ( String queue ) { int userLimit = rmConf . getInt ( toFullPropertyName ( queue , "minimum-user-limit-percent" ) , defaultUlimitMinimum ) ; if ( userLimit <= 0 || userLimit > 100 ) { throw new IllegalArgumentException ( "Invalid user limit : " + userLimit + " for queue : " + queue ) ; } return userLimit ; }
Get the minimum limit of resources for any user submitting jobs in this queue in percentage .
31,852
public int getMaxJobsPerUserToInitialize ( String queue ) { int maxJobsPerUser = rmConf . getInt ( toFullPropertyName ( queue , "maximum-initialized-jobs-per-user" ) , defaultMaxJobsPerUsersToInitialize ) ; if ( maxJobsPerUser <= 0 ) { throw new IllegalArgumentException ( "Invalid maximum jobs per user configuration " + maxJobsPerUser ) ; } return maxJobsPerUser ; }
Gets the maximum number of jobs which are allowed to initialize in the job queue .
31,853
boolean addBlock ( BlockInfo b ) { int dnIndex = b . addNode ( this ) ; if ( dnIndex < 0 ) return false ; blockList = b . listInsert ( blockList , this , dnIndex ) ; numOfBlocks ++ ; return true ; }
Add data - node to the block . Add block to the head of the list of blocks belonging to the data - node .
31,854
void insertIntoList ( BlockInfo head , int headIndex , BlockInfo tail , int tailIndex , int count ) { if ( head == null ) return ; tail . setNext ( tailIndex , blockList ) ; if ( blockList != null ) blockList . setPrevious ( blockList . findDatanode ( this ) , tail ) ; blockList = head ; blockList . setPrevious ( headIndex , null ) ; numOfBlocks += count ; }
Adds blocks already connected into list to this descriptor s blocks . The blocks in the input list already have this descriptor inserted to them . Used for parallel initial block reports .
31,855
boolean removeBlock ( BlockInfo b ) { blockList = b . listRemove ( blockList , this ) ; if ( b . removeNode ( this ) ) { numOfBlocks -- ; return true ; } else { return false ; } }
Remove block from the list of blocks belonging to the data - node . Remove data - node from the block .
31,856
void moveBlockToHead ( BlockInfo b ) { blockList = b . listRemove ( blockList , this ) ; blockList = b . listInsert ( blockList , this , - 1 ) ; }
Move block to the head of the list of blocks belonging to the data - node .
31,857
protected BlockInfo listMoveToHead ( BlockInfo block , BlockInfo head , DatanodeIndex indexes ) { assert head != null : "Head can not be null" ; if ( head == block ) { return head ; } BlockInfo next = block . getSetNext ( indexes . currentIndex , head ) ; BlockInfo prev = block . getSetPrevious ( indexes . currentIndex , null ) ; head . setPrevious ( indexes . headIndex , block ) ; indexes . headIndex = indexes . currentIndex ; prev . setNext ( prev . findDatanode ( this ) , next ) ; if ( next != null ) next . setPrevious ( next . findDatanode ( this ) , prev ) ; return block ; }
Remove block from the list and insert into the head of the list of blocks related to the specified DatanodeDescriptor . If the head is null then form a new list .
31,858
void addBlockToBeReplicated ( Block block , DatanodeDescriptor [ ] targets ) { assert ( block != null && targets != null && targets . length > 0 ) ; replicateBlocks . offer ( block , targets ) ; }
Store block replication work .
31,859
void addBlockToBeRecovered ( Block block , DatanodeDescriptor [ ] targets ) { assert ( block != null && targets != null && targets . length > 0 ) ; recoverBlocks . offer ( block , targets ) ; }
Store block recovery work .
31,860
void addBlocksToBeInvalidated ( List < Block > blocklist ) { assert ( blocklist != null && blocklist . size ( ) > 0 ) ; synchronized ( invalidateBlocks ) { for ( Block blk : blocklist ) { invalidateBlocks . add ( blk ) ; } } }
Store block invalidation work .
31,861
BlockCommand getInvalidateBlocks ( int maxblocks ) { Block [ ] deleteList = null ; synchronized ( invalidateBlocks ) { deleteList = invalidateBlocks . pollToArray ( new Block [ Math . min ( invalidateBlocks . size ( ) , maxblocks ) ] ) ; } return ( deleteList == null || deleteList . length == 0 ) ? null : new BlockCommand ( DatanodeProtocol . DNA_INVALIDATE , deleteList ) ; }
Remove the specified number of blocks to be invalidated
31,862
void readFieldsFromFSEditLog ( DataInput in ) throws IOException { this . name = UTF8 . readString ( in ) ; this . storageID = UTF8 . readString ( in ) ; this . infoPort = in . readShort ( ) & 0x0000ffff ; this . capacity = in . readLong ( ) ; this . dfsUsed = in . readLong ( ) ; this . remaining = in . readLong ( ) ; this . lastUpdate = in . readLong ( ) ; this . xceiverCount = in . readInt ( ) ; this . location = Text . readString ( in ) ; this . hostName = Text . readString ( in ) ; setAdminState ( WritableUtils . readEnum ( in , AdminStates . class ) ) ; }
Serialization for FSEditLog
31,863
private void rollBlocksScheduled ( long now ) { if ( ( now - lastBlocksScheduledRollTime ) > BLOCKS_SCHEDULED_ROLL_INTERVAL ) { prevApproxBlocksScheduled = currApproxBlocksScheduled ; currApproxBlocksScheduled = 0 ; lastBlocksScheduledRollTime = now ; } }
Adjusts curr and prev number of blocks scheduled every few minutes .
31,864
public String getMessage ( ) { StringBuffer result = new StringBuffer ( ) ; Iterator < IOException > itr = problems . iterator ( ) ; while ( itr . hasNext ( ) ) { result . append ( itr . next ( ) . getMessage ( ) ) ; if ( itr . hasNext ( ) ) { result . append ( "\n" ) ; } } return result . toString ( ) ; }
Get a summary message of the problems found .
31,865
private boolean newImage ( String name , String filename ) { ImageDescriptor id ; boolean success ; try { URL fileURL = FileLocator . find ( bundle , new Path ( RESOURCE_DIR + filename ) , null ) ; id = ImageDescriptor . createFromURL ( FileLocator . toFileURL ( fileURL ) ) ; success = true ; } catch ( Exception e ) { e . printStackTrace ( ) ; id = ImageDescriptor . getMissingImageDescriptor ( ) ; success = false ; } descMap . put ( name , id ) ; imageMap . put ( name , id . createImage ( true ) ) ; return success ; }
Load and register a new image . If the image resource does not exist or fails to load a default error resource is supplied .
31,866
public static < K , V > QuorumException create ( String simpleMsg , Map < K , V > successes , Map < K , Throwable > exceptions ) { Preconditions . checkArgument ( ! exceptions . isEmpty ( ) , "Must pass exceptions" ) ; StringBuilder msg = new StringBuilder ( ) ; msg . append ( simpleMsg ) . append ( ". " ) ; if ( ! successes . isEmpty ( ) ) { msg . append ( successes . size ( ) ) . append ( " successful responses:\n" ) ; Joiner . on ( "\n" ) . useForNull ( "null [success]" ) . withKeyValueSeparator ( ": " ) . appendTo ( msg , successes ) ; msg . append ( "\n" ) ; } msg . append ( exceptions . size ( ) + " exceptions thrown:\n" ) ; boolean isFirst = true ; for ( Map . Entry < K , Throwable > e : exceptions . entrySet ( ) ) { if ( ! isFirst ) { msg . append ( "\n" ) ; } isFirst = false ; msg . append ( e . getKey ( ) ) . append ( ": " ) ; if ( e . getValue ( ) instanceof RuntimeException ) { msg . append ( StringUtils . stringifyException ( e . getValue ( ) ) ) ; } else if ( e . getValue ( ) . getLocalizedMessage ( ) != null ) { msg . append ( e . getValue ( ) . getLocalizedMessage ( ) ) ; } else { msg . append ( StringUtils . stringifyException ( e . getValue ( ) ) ) ; } } return new QuorumException ( msg . toString ( ) ) ; }
Create a QuorumException instance with a descriptive message detailing the underlying exceptions as well as any successful responses which were returned .
31,867
public static < K > void store ( Configuration conf , K item , String keyName ) throws IOException { DefaultStringifier < K > stringifier = new DefaultStringifier < K > ( conf , GenericsUtil . getClass ( item ) ) ; conf . set ( keyName , stringifier . toString ( item ) ) ; stringifier . close ( ) ; }
Stores the item in the configuration with the given keyName .
31,868
public static < K > K load ( Configuration conf , String keyName , Class < K > itemClass ) throws IOException { DefaultStringifier < K > stringifier = new DefaultStringifier < K > ( conf , itemClass ) ; try { String itemStr = conf . get ( keyName ) ; return stringifier . fromString ( itemStr ) ; } finally { stringifier . close ( ) ; } }
Restores the object from the configuration .
31,869
public static < K > void storeArray ( Configuration conf , K [ ] items , String keyName ) throws IOException { DefaultStringifier < K > stringifier = new DefaultStringifier < K > ( conf , GenericsUtil . getClass ( items [ 0 ] ) ) ; try { StringBuilder builder = new StringBuilder ( ) ; for ( K item : items ) { builder . append ( stringifier . toString ( item ) ) . append ( SEPARATOR ) ; } conf . set ( keyName , builder . toString ( ) ) ; } finally { stringifier . close ( ) ; } }
Stores the array of items in the configuration with the given keyName .
31,870
public static < K > K [ ] loadArray ( Configuration conf , String keyName , Class < K > itemClass ) throws IOException { DefaultStringifier < K > stringifier = new DefaultStringifier < K > ( conf , itemClass ) ; try { String itemStr = conf . get ( keyName ) ; ArrayList < K > list = new ArrayList < K > ( ) ; String [ ] parts = itemStr . split ( SEPARATOR ) ; for ( String part : parts ) { if ( ! part . equals ( "" ) ) list . add ( stringifier . fromString ( part ) ) ; } return GenericsUtil . toArray ( itemClass , list ) ; } finally { stringifier . close ( ) ; } }
Restores the array of objects from the configuration .
31,871
public short getBlockReplication ( BlockInfo block ) { if ( storage . isSourceBlock ( block ) ) { return getReplication ( ) ; } else { if ( storage . getStorageType ( ) == StorageType . RAID_STORAGE ) { return ( ( INodeRaidStorage ) storage ) . getCodec ( ) . parityReplication ; } else { throw new IllegalStateException ( "parity block " + block + " belongs to a non-raid file" ) ; } } }
may have different replication
31,872
protected void reportRegister ( DatanodeID node ) { if ( node != null && shouldUpdateNodes ( ) ) { if ( ! liveDatanodes . contains ( node ) ) { outStandingHeartbeats . add ( node ) ; liveDatanodes . add ( node ) ; } } }
Processes a register from the datanode . First we will await a heartbeat and later for a incremental block report .
31,873
protected boolean reportHeartBeat ( DatanodeID node ) { if ( node != null && shouldUpdateNodes ( ) ) { reportRegister ( node ) ; synchronized ( this ) { if ( outStandingHeartbeats . remove ( node ) ) { outStandingReports . add ( node ) ; return true ; } } } return false ; }
Processes a heartbeat from the datanode and determines whether we should send a ClearPrimary command to it .
31,874
protected void reportPrimaryCleared ( DatanodeID node ) { if ( node != null && shouldUpdateNodes ( ) ) { if ( outStandingReports . remove ( node ) ) { LOG . info ( "Failover: Outstanding reports: " + outStandingReports . size ( ) ) ; } } }
Report that the given datanode has cleared the primary . It is fully aware of the failover and it has sent the incremental block report .
31,875
protected void triggerFailover ( ) throws IOException { clearDataStructures ( ) ; prepareFailover = false ; for ( DatanodeInfo node : namesystem . datanodeReport ( DatanodeReportType . LIVE ) ) { liveDatanodes . add ( node ) ; outStandingHeartbeats . add ( node ) ; } InjectionHandler . processEvent ( InjectionEvent . STANDBY_ENTER_SAFE_MODE ) ; safeModeState = SafeModeState . FAILOVER_IN_PROGRESS ; InjectionHandler . processEvent ( InjectionEvent . STANDBY_FAILOVER_INPROGRESS ) ; safeModeMonitor = new Daemon ( new SafeModeMonitor ( namesystem , this ) ) ; safeModeMonitor . start ( ) ; try { safeModeMonitor . join ( ) ; } catch ( InterruptedException ie ) { throw new IOException ( "triggerSafeMode() interruped()" ) ; } if ( safeModeState != SafeModeState . AFTER_FAILOVER ) { throw new IOException ( "safeModeState is : " + safeModeState + " which does not indicate a successfull exit of safemode" ) ; } }
Triggers failover processing for safe mode and blocks until we have left safe mode .
31,876
private synchronized boolean datanodeReportsReceived ( boolean checkDatanodes ) { try { boolean received = this . getDatanodeReportRatio ( ) >= this . outStandingReportThreshold ; if ( ! received && checkDatanodes ) { checkDatanodes ( ) ; return this . getDatanodeReportRatio ( ) >= this . outStandingReportThreshold ; } return received ; } catch ( Exception e ) { LOG . warn ( "Failover - caught exception when checking reports" , e ) ; return false ; } }
Checks if the datanode reports have been received
31,877
public void setPrimary ( InstanceId ofPrimary ) throws IOException { if ( ofPrimary == null ) { primaryNode = null ; standbyNode = null ; return ; } switch ( ofPrimary ) { case NODEZERO : primaryNode = getNodeZero ( ) ; standbyNode = getNodeOne ( ) ; case NODEONE : primaryNode = getNodeOne ( ) ; standbyNode = getNodeZero ( ) ; } }
The purpose of this method is to update in memory references to primary and standby . Please override this method and do the zookeeper specific logic in the method and call this method on successful zookeeper write .
31,878
public void setConfSource ( Configurable src ) { validateConfigFile ( src . getConf ( ) ) ; confSrc = src ; zkClient = new AvatarZooKeeperClient ( confSrc . getConf ( ) , null , true ) ; }
Sets the backing configuration source
31,879
void addCache ( Path hdfsPath , Path localPath , long size ) throws IOException { localMetrics . numAdd ++ ; CacheEntry c = new CacheEntry ( hdfsPath , localPath , size ) ; CacheEntry found = cacheMap . putIfAbsent ( hdfsPath , c ) ; if ( found != null ) { assert size == found . entrySize ; assert localPath . equals ( found . localPath ) ; found . setGenstamp ( globalStamp . incrementAndGet ( ) ) ; localMetrics . numAddExisting ++ ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "LookasideCache updating path " + hdfsPath ) ; } } else { cacheSize . addAndGet ( size ) ; localMetrics . numAddNew ++ ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "LookasideCache add new path:" + hdfsPath + " cachedPath:" + localPath + " size " + size ) ; } } if ( cacheSize . get ( ) > cacheSizeMax ) { checkEvict ( ) ; } }
Adds an entry into the cache . The size is the virtual size of this entry .
31,880
void renameCache ( Path oldhdfsPath , Path newhdfsPath , Path localPath ) throws IOException { CacheEntry found = cacheMap . remove ( oldhdfsPath ) ; if ( found == null ) { String msg = "LookasideCache error renaming path: " + oldhdfsPath + " to: " + newhdfsPath + " Path " + newhdfsPath + " because it does not exists in the cache." ; LOG . warn ( msg ) ; return ; } found . hdfsPath = newhdfsPath ; found . setGenstamp ( globalStamp . incrementAndGet ( ) ) ; found . localPath = localPath ; CacheEntry empty = cacheMap . putIfAbsent ( newhdfsPath , found ) ; if ( empty != null ) { String msg = "LookasideCache error renaming path: " + oldhdfsPath + " to: " + newhdfsPath + " Path " + newhdfsPath + " already exists in the cache." ; LOG . warn ( msg ) ; throw new IOException ( msg ) ; } localMetrics . numRename ++ ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "LookasideCache renamed path:" + oldhdfsPath + " to:" + newhdfsPath + " cachedPath: " + localPath ) ; } }
Change the localPath in the cache . The size remains the same . The accesstime is updated .
31,881
void removeCache ( Path hdfsPath ) { CacheEntry c = cacheMap . remove ( hdfsPath ) ; if ( c != null ) { cacheSize . addAndGet ( - c . entrySize ) ; localMetrics . numRemove ++ ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "LookasideCache removed path:" + hdfsPath + " freed up size: " + c . entrySize ) ; } } }
Delete an entry from the cache .
31,882
void evictCache ( Path hdfsPath ) throws IOException { CacheEntry c = cacheMap . remove ( hdfsPath ) ; if ( c != null ) { cacheSize . addAndGet ( - c . entrySize ) ; if ( evictionIface != null ) { evictionIface . evictCache ( c . hdfsPath , c . localPath , c . entrySize ) ; } localMetrics . numEvict ++ ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "LookasideCache removed path:" + hdfsPath + " freed up size: " + c . entrySize ) ; } } }
Evicts an entry from the cache . This calls back into the application to indicate that a cache entry has been reclaimed .
31,883
Path getCache ( Path hdfsPath ) { CacheEntry c = cacheMap . get ( hdfsPath ) ; localMetrics . numGetAttempts ++ ; if ( c != null ) { c . setGenstamp ( globalStamp . incrementAndGet ( ) ) ; localMetrics . numGetHits ++ ; return c . localPath ; } return null ; }
Maps the hdfs pathname to a local pathname . Returns null if this is not found in the cache .
31,884
synchronized void checkEvict ( ) throws IOException { if ( cacheSize . get ( ) < cacheSizeMax ) { return ; } if ( evictionInProgress ) { return ; } evictionInProgress = true ; try { long curSize = cacheSize . get ( ) ; long targetSize = cacheSizeMax - ( cacheSizeMax * cacheEvictPercent ) / 100 ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Cache size " + curSize + " has exceeded the " + " maximum configured cacpacity " + cacheSizeMax + ". Eviction has to reduce cache size to " + targetSize ) ; } Collection < CacheEntry > values = cacheMap . values ( ) ; CacheEntry [ ] records = values . toArray ( new CacheEntry [ values . size ( ) ] ) ; Arrays . sort ( records , LRU_COMPARATOR ) ; for ( int i = 0 ; i < records . length ; i ++ ) { if ( cacheSize . get ( ) <= targetSize ) { break ; } CacheEntry c = records [ i ] ; evictCache ( c . hdfsPath ) ; } } finally { evictionInProgress = false ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Cache eviction complete. Current cache size is " + cacheSize . get ( ) ) ; } }
Eviction occurs if the cache is full we free up a specified percentage of the cache on every run . This method is synchronized so that only one thread is doing the eviction .
31,885
public synchronized static Random getSecureRandom ( ) { if ( cachedSecureRandom != null ) return cachedSecureRandom ; try { return SecureRandom . getInstance ( "SHA1PRNG" ) ; } catch ( NoSuchAlgorithmException e ) { return R ; } }
get an instance of a SecureRandom for creating storageid
31,886
protected void initGlobalSetting ( Configuration conf , AbstractList < File > dataDirs ) throws IOException { this . dataDirs = dataDirs ; this . conf = conf ; storage = new DataStorage ( this ) ; initConfig ( conf ) ; registerMXBean ( ) ; initDataXceiver ( conf ) ; startInfoServer ( conf ) ; initIpcServer ( conf ) ; myMetrics = new DataNodeMetrics ( conf , storage . getStorageID ( ) ) ; setCountingLoggers ( myMetrics ) ; threadLivenessReporter = new DatanodeThreadLivenessReporter ( conf . getLong ( "dfs.datanode.thread.liveness.threshold" , 240 * 1000 ) , myMetrics . threadActiveness ) ; }
Initialize global settings for DN
31,887
protected void initDataSetAndScanner ( Configuration conf , AbstractList < File > dataDirs , int numOfNameSpaces ) throws IOException { initFsDataSet ( conf , dataDirs , numOfNameSpaces ) ; initDataBlockScanner ( conf ) ; initDirectoryScanner ( conf ) ; }
Initialize dataset and block scanner
31,888
void startDataNode ( Configuration conf , AbstractList < File > dataDirs ) throws IOException { initGlobalSetting ( conf , dataDirs ) ; List < InetSocketAddress > nameNodeAddrs = DFSUtil . getNNServiceRpcAddresses ( conf ) ; DataNode . nameNodeAddr = nameNodeAddrs . get ( 0 ) ; namespaceManager = new NamespaceManager ( conf , nameNodeAddrs ) ; initDataSetAndScanner ( conf , dataDirs , nameNodeAddrs . size ( ) ) ; }
This method starts the data node with the specified conf .
31,889
public static InetSocketAddress getNameNodeAddress ( Configuration conf ) { InetSocketAddress addr = null ; addr = NameNode . getDNProtocolAddress ( conf ) ; if ( addr != null ) { return addr ; } return NameNode . getClientProtocolAddress ( conf ) ; }
This method returns the address namenode uses to communicate with datanodes . If this address is not configured the default NameNode address is used as it is running only one RPC server . If it is running multiple servers this address cannot be used by clients!!
31,890
public DatanodeProtocol getNSNamenode ( int namespaceId ) throws IOException { NamespaceService nsos = namespaceManager . get ( namespaceId ) ; if ( nsos == null || nsos . getDatanodeProtocol ( ) == null ) { throw new IOException ( "cannot find a namnode proxy for namespaceId=" + namespaceId ) ; } return nsos . getDatanodeProtocol ( ) ; }
Get namenode corresponding to a namespace
31,891
public DatanodeRegistration getDNRegistrationForNS ( int namespaceId ) throws IOException { NamespaceService nsos = namespaceManager . get ( namespaceId ) ; if ( nsos == null || nsos . getNsRegistration ( ) == null ) { throw new IOException ( "cannot find NSOfferService for namespaceId=" + namespaceId ) ; } return nsos . getNsRegistration ( ) ; }
get datanode registration by namespace id
31,892
public void shutdown ( ) { if ( this . shuttingDown . getAndSet ( true ) ) { LOG . warn ( "DataNode.shutdown() was called while shutting down." ) ; return ; } if ( infoServer != null ) { try { infoServer . stop ( ) ; } catch ( Exception e ) { LOG . warn ( "Exception shutting down DataNode" , e ) ; } } if ( ipcServer != null ) { ipcServer . stop ( ) ; } this . shouldRun = false ; if ( dataXceiverServer != null ) { ( ( DataXceiverServer ) this . dataXceiverServer . getRunnable ( ) ) . kill ( ) ; this . dataXceiverServer . interrupt ( ) ; if ( this . threadGroup != null ) { int retries = 0 ; while ( true ) { this . threadGroup . interrupt ( ) ; LOG . info ( "Waiting for threadgroup to exit, active threads is " + this . threadGroup . activeCount ( ) ) ; if ( this . threadGroup . activeCount ( ) == 0 ) { break ; } try { if ( ++ retries > 600 ) { Thread [ ] activeThreads = new Thread [ this . threadGroup . activeCount ( ) ] ; this . threadGroup . enumerate ( activeThreads , true ) ; LOG . info ( "Active Threads: " + Arrays . toString ( activeThreads ) ) ; LOG . warn ( "Waited for ThreadGroup to be empty for 10 minutes." + " SHUTTING DOWN NOW" ) ; break ; } Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { } } } try { this . dataXceiverServer . join ( ) ; } catch ( InterruptedException ie ) { } } if ( blockCopyExecutor != null && ! blockCopyExecutor . isShutdown ( ) ) { blockCopyExecutor . shutdownNow ( ) ; } if ( namespaceManager != null ) { namespaceManager . shutDownAll ( ) ; } if ( blockScanner != null ) { blockScanner . shutdown ( ) ; } if ( directoryScanner != null ) { shutdownDirectoryScanner ( ) ; } if ( storage != null ) { try { this . storage . unlockAll ( ) ; } catch ( IOException ie ) { } } if ( data != null ) { data . shutdown ( ) ; } if ( myMetrics != null ) { setCountingLoggers ( null ) ; myMetrics . shutdown ( ) ; } this . shutdownMXBean ( ) ; }
Shut down this instance of the datanode . Returns only after shutdown is complete . This method can only be called by the offerService thread . Otherwise deadlock might occur .
31,893
protected void checkDiskError ( Exception e ) throws IOException { if ( e instanceof ClosedByInterruptException || e instanceof java . io . InterruptedIOException ) { return ; } LOG . warn ( "checkDiskError: exception: " , e ) ; if ( e . getMessage ( ) != null && e . getMessage ( ) . startsWith ( "No space left on device" ) ) { throw new DiskOutOfSpaceException ( "No space left on device" ) ; } else { checkDiskError ( ) ; } }
Check if there is no space in disk
31,894
protected void checkDiskError ( ) throws IOException { boolean setSuccess = checkingDisk . compareAndSet ( false , true ) ; if ( ! setSuccess ) { LOG . info ( "checkDiskError is already running." ) ; return ; } try { long curTime = System . currentTimeMillis ( ) ; if ( curTime - timeLastCheckDisk < minDiskCheckIntervalMsec ) { LOG . info ( "checkDiskError finished within " + minDiskCheckIntervalMsec + " mses. Skip this one." ) ; return ; } data . checkDataDir ( ) ; timeLastCheckDisk = System . currentTimeMillis ( ) ; } catch ( DiskErrorException de ) { handleDiskError ( de . getMessage ( ) ) ; } finally { checkingDisk . set ( false ) ; } }
Check if there is a disk failure and if so handle the error
31,895
public static String [ ] getListOfDataDirs ( Configuration conf ) { String [ ] configFilePath = conf . getStrings ( "dfs.datadir.confpath" ) ; String [ ] dataDirs = null ; if ( configFilePath != null && ( configFilePath . length != 0 ) ) { try { DataDirFileReader reader = new DataDirFileReader ( configFilePath [ 0 ] ) ; dataDirs = reader . getArrayOfCurrentDataDirectories ( ) ; if ( dataDirs == null ) { LOG . warn ( "File is empty, using dfs.data.dir directories" ) ; } } catch ( Exception e ) { LOG . warn ( "Could not read file, using directories from dfs.data.dir" + " Exception: " , e ) ; } } else { LOG . warn ( "No dfs.datadir.confpath not defined, now using default " + "directories" ) ; } if ( dataDirs == null ) { dataDirs = conf . getStrings ( "dfs.data.dir" ) ; } return dataDirs ; }
Returns a list of data directories from the file provided by the dfs . datadir . confpath . If it cannot get the list of data directories then the method will return the default dataDirs from dfs . data . dir .
31,896
public static DataNode createDataNode ( String args [ ] , Configuration conf ) throws IOException { DataNode dn = instantiateDataNode ( args , conf ) ; if ( dn != null ) { dn . runDatanodeDaemon ( ) ; } return dn ; }
Instantiate & Start a single datanode daemon and wait for it to finish . If this thread is specifically interrupted it will stop waiting .
31,897
public String getNamenodeAddresses ( ) { final Map < String , Integer > info = new HashMap < String , Integer > ( ) ; for ( NamespaceService ns : namespaceManager . getAllNamenodeThreads ( ) ) { if ( ns != null && ns . initialized ( ) ) { info . put ( ns . getNNSocketAddress ( ) . getAddress ( ) . getHostAddress ( ) , ns . getNamespaceId ( ) ) ; } } return JSON . toString ( info ) ; }
Returned information is a JSON representation of a map with name node host name as the key and block pool Id as the value
31,898
public String getVolumeInfo ( ) { final Map < String , Object > info = new HashMap < String , Object > ( ) ; try { FSVolume [ ] volumes = ( ( FSDataset ) this . data ) . volumes . getVolumes ( ) ; for ( FSVolume v : volumes ) { final Map < String , Object > innerInfo = new HashMap < String , Object > ( ) ; innerInfo . put ( "usedSpace" , v . getDfsUsed ( ) ) ; innerInfo . put ( "freeSpace" , v . getAvailable ( ) ) ; innerInfo . put ( "reservedSpace" , v . getReserved ( ) ) ; info . put ( v . getDir ( ) . toString ( ) , innerInfo ) ; } return JSON . toString ( info ) ; } catch ( IOException e ) { LOG . info ( "Cannot get volume info." , e ) ; return "ERROR" ; } }
Returned information is a JSON representation of a map with volume name as the key and value is a map of volume attribute keys to its values
31,899
public void sendBlocksBeingWrittenReport ( DatanodeProtocol node , int namespaceId , DatanodeRegistration nsRegistration ) throws IOException { Block [ ] blocks = data . getBlocksBeingWrittenReport ( namespaceId ) ; if ( blocks != null && blocks . length != 0 ) { long [ ] blocksAsLong = BlockListAsLongs . convertToArrayLongs ( blocks ) ; BlockReport bbwReport = new BlockReport ( blocksAsLong ) ; node . blocksBeingWrittenReport ( nsRegistration , bbwReport ) ; } }
Sends a Blocks Being Written report to the given node .