idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,200
public void log ( Log log ) { log . info ( "Counters: " + size ( ) ) ; for ( Group group : this ) { log . info ( " " + group . getDisplayName ( ) ) ; for ( Counter counter : group ) { log . info ( " " + counter . getDisplayName ( ) + "=" + counter . getCounter ( ) ) ; } } }
Logs the current counter values .
33,201
public synchronized String makeCompactString ( ) { StringBuffer buffer = new StringBuffer ( ) ; boolean first = true ; for ( Group group : this ) { for ( Counter counter : group ) { if ( first ) { first = false ; } else { buffer . append ( ',' ) ; } buffer . append ( group . getDisplayName ( ) ) ; buffer . append ( '.' ) ; buffer . append ( counter . getDisplayName ( ) ) ; buffer . append ( ':' ) ; buffer . append ( counter . getCounter ( ) ) ; } } return buffer . toString ( ) ; }
Convert a counters object into a single line that is easy to parse .
33,202
public synchronized String makeJsonString ( ) { Map < String , Map < String , Long > > data = new HashMap < String , Map < String , Long > > ( ) ; for ( Group group : this ) { Map < String , Long > groupData = new HashMap < String , Long > ( ) ; data . put ( group . getDisplayName ( ) , groupData ) ; for ( Counter counter : group ) { groupData . put ( counter . getDisplayName ( ) , counter . getCounter ( ) ) ; } } return JSON . toString ( data ) ; }
Convert a counters object into a JSON string
33,203
public synchronized String makeEscapedCompactString ( ) { StringBuffer buffer = new StringBuffer ( ) ; for ( Group group : this ) { buffer . append ( group . makeEscapedCompactString ( ) ) ; } return buffer . toString ( ) ; }
Represent the counter in a textual format that can be converted back to its object form
33,204
private static String getBlock ( String str , char open , char close , IntWritable index ) throws ParseException { StringBuilder split = new StringBuilder ( ) ; int next = StringUtils . findNext ( str , open , StringUtils . ESCAPE_CHAR , index . get ( ) , split ) ; split . setLength ( 0 ) ; if ( next >= 0 ) { ++ next ; next = StringUtils . findNext ( str , close , StringUtils . ESCAPE_CHAR , next , split ) ; if ( next >= 0 ) { ++ next ; index . set ( next ) ; return split . toString ( ) ; } else { throw new ParseException ( "Unexpected end of block" , next ) ; } } return null ; }
returns null .
33,205
public synchronized static String getUniqueFile ( TaskAttemptContext context , String name , String extension ) { TaskID taskId = context . getTaskAttemptID ( ) . getTaskID ( ) ; int partition = taskId . getId ( ) ; StringBuilder result = new StringBuilder ( ) ; result . append ( name ) ; result . append ( '-' ) ; result . append ( taskId . isMap ( ) ? 'm' : 'r' ) ; result . append ( '-' ) ; result . append ( NUMBER_FORMAT . format ( partition ) ) ; result . append ( extension ) ; return result . toString ( ) ; }
Generate a unique filename based on the task id name and extension
33,206
public Path getDefaultWorkFile ( TaskAttemptContext context , String extension ) throws IOException { FileOutputCommitter committer = ( FileOutputCommitter ) getOutputCommitter ( context ) ; return new Path ( committer . getWorkPath ( ) , getUniqueFile ( context , "part" , extension ) ) ; }
Get the default path and filename for the output format .
33,207
public void reduce ( Text key , Iterator < Text > values , OutputCollector < Text , Text > output , Reporter reporter ) throws IOException { String keyStr = key . toString ( ) ; int pos = keyStr . indexOf ( ValueAggregatorDescriptor . TYPE_SEPARATOR ) ; String type = keyStr . substring ( 0 , pos ) ; ValueAggregator aggregator = ValueAggregatorBaseDescriptor . generateValueAggregator ( type ) ; while ( values . hasNext ( ) ) { aggregator . addNextValue ( values . next ( ) ) ; } Iterator outputs = aggregator . getCombinerOutput ( ) . iterator ( ) ; while ( outputs . hasNext ( ) ) { Object v = outputs . next ( ) ; if ( v instanceof Text ) { output . collect ( key , ( Text ) v ) ; } else { output . collect ( key , new Text ( v . toString ( ) ) ) ; } } }
Combines values for a given key .
33,208
synchronized Long getMemReservedForTasks ( TaskTrackerStatus taskTracker , TaskType taskType ) { long vmem = 0 ; for ( TaskStatus task : taskTracker . getTaskReports ( ) ) { if ( ( task . getRunState ( ) == TaskStatus . State . RUNNING ) || ( task . getRunState ( ) == TaskStatus . State . UNASSIGNED ) || ( task . inTaskCleanupPhase ( ) ) ) { long myVmem = 0 ; if ( task . getIsMap ( ) && taskType == TaskType . MAP ) { long memSizePerMapSlot = scheduler . getMemSizeForMapSlot ( ) ; myVmem = memSizePerMapSlot * task . getNumSlots ( ) ; } else if ( ! task . getIsMap ( ) && taskType == TaskType . REDUCE ) { long memSizePerReduceSlot = scheduler . getMemSizeForReduceSlot ( ) ; myVmem = memSizePerReduceSlot * task . getNumSlots ( ) ; } vmem += myVmem ; } } return Long . valueOf ( vmem ) ; }
Find the memory that is already used by all the running tasks residing on the given TaskTracker .
33,209
boolean matchesMemoryRequirements ( JobInProgress job , TaskType taskType , TaskTrackerStatus taskTracker ) { LOG . debug ( "Matching memory requirements of " + job . getJobID ( ) . toString ( ) + " for scheduling on " + taskTracker . trackerName ) ; if ( ! isSchedulingBasedOnMemEnabled ( ) ) { LOG . debug ( "Scheduling based on job's memory requirements is disabled." + " Ignoring any value set by job." ) ; return true ; } Long memUsedOnTT = getMemReservedForTasks ( taskTracker , taskType ) ; long totalMemUsableOnTT = 0 ; long memForThisTask = 0 ; if ( taskType == TaskType . MAP ) { memForThisTask = job . getJobConf ( ) . getMemoryForMapTask ( ) ; totalMemUsableOnTT = scheduler . getMemSizeForMapSlot ( ) * taskTracker . getMaxMapSlots ( ) ; } else if ( taskType == TaskType . REDUCE ) { memForThisTask = job . getJobConf ( ) . getMemoryForReduceTask ( ) ; totalMemUsableOnTT = scheduler . getMemSizeForReduceSlot ( ) * taskTracker . getMaxReduceSlots ( ) ; } long freeMemOnTT = totalMemUsableOnTT - memUsedOnTT . longValue ( ) ; if ( memForThisTask > freeMemOnTT ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "memForThisTask (" + memForThisTask + ") > freeMemOnTT (" + freeMemOnTT + "). A " + taskType + " task from " + job . getJobID ( ) . toString ( ) + " cannot be scheduled on TT " + taskTracker . trackerName ) ; } return false ; } if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "memForThisTask = " + memForThisTask + ". freeMemOnTT = " + freeMemOnTT + ". A " + taskType . toString ( ) + " task from " + job . getJobID ( ) . toString ( ) + " matches memory requirements " + "on TT " + taskTracker . trackerName ) ; } return true ; }
Check if a TT has enough memory to run of task specified from this job .
33,210
public Path getLocalPathToRead ( String pathStr , Configuration conf ) throws IOException { AllocatorPerContext context = obtainContext ( contextCfgItemName ) ; return context . getLocalPathToRead ( pathStr , conf ) ; }
Get a path from the local FS for reading . We search through all the configured dirs for the file s existence and return the complete path to the file when we find one
33,211
public boolean ifExists ( String pathStr , Configuration conf ) { AllocatorPerContext context = obtainContext ( contextCfgItemName ) ; return context . ifExists ( pathStr , conf ) ; }
We search through all the configured dirs for the file s existence and return true when we find
33,212
private void waitForLastTxIdNode ( AvatarZooKeeperClient zk , Configuration conf ) throws Exception { String address = conf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; long maxWaitTime = this . getMaxWaitTimeForWaitTxid ( ) ; long start = System . currentTimeMillis ( ) ; while ( true ) { if ( System . currentTimeMillis ( ) - start > maxWaitTime ) { throw new IOException ( "No valid last txid znode found" ) ; } try { long sessionId = zk . getPrimarySsId ( address , false ) ; ZookeeperTxId zkTxId = zk . getPrimaryLastTxId ( address , false ) ; if ( sessionId != zkTxId . getSessionId ( ) ) { LOG . warn ( "Session Id in the ssid node : " + sessionId + " does not match the session Id in the txid node : " + zkTxId . getSessionId ( ) + " retrying..." ) ; Thread . sleep ( retrySleep ) ; continue ; } } catch ( Throwable e ) { LOG . warn ( "Caught exception : " + e + " retrying ..." ) ; Thread . sleep ( retrySleep ) ; continue ; } break ; } }
Waits till the last txid node appears in Zookeeper such that it matches the ssid node .
33,213
public int setAvatar ( String role , boolean noverification , String serviceName , String instance ) throws IOException { Avatar dest ; if ( Avatar . ACTIVE . toString ( ) . equalsIgnoreCase ( role ) ) { dest = Avatar . ACTIVE ; } else if ( Avatar . STANDBY . toString ( ) . equalsIgnoreCase ( role ) ) { throw new IOException ( "setAvatar Command only works to switch avatar" + " from Standby to Primary" ) ; } else { throw new IOException ( "Unknown avatar type " + role ) ; } Avatar current = avatarnode . getAvatar ( ) ; if ( current == dest ) { System . out . println ( "This instance is already in " + current + " avatar." ) ; } else { try { avatarnode . quiesceForFailover ( noverification ) ; } catch ( RemoteException re ) { handleRemoteException ( re ) ; } avatarnode . performFailover ( ) ; updateZooKeeper ( serviceName , instance ) ; } return 0 ; }
Sets the avatar to the specified value
33,214
public static boolean isServiceSpecified ( String command , Configuration conf , String [ ] argv ) { if ( conf . get ( FSConstants . DFS_FEDERATION_NAMESERVICES ) != null ) { for ( int i = 0 ; i < argv . length ; i ++ ) { if ( argv [ i ] . equals ( "-service" ) ) { return true ; } } printServiceErrorMessage ( command , conf ) ; return false ; } return true ; }
Checks if the service argument is specified in the command arguments .
33,215
private double calculateRate ( long cumulative , long currentTime ) { long timeSinceMapStart = 0 ; assert getPhase ( ) == Phase . MAP : "MapTaskStatus not in map phase!" ; long startTime = getStartTime ( ) ; timeSinceMapStart = currentTime - startTime ; if ( timeSinceMapStart <= 0 ) { LOG . error ( "Current time is " + currentTime + " but start time is " + startTime ) ; return 0 ; } return cumulative / timeSinceMapStart ; }
Helper function that calculate the rate given the total so far and the current time
33,216
public boolean hasEnoughMemory ( ClusterNode node ) { int total = node . getTotal ( ) . memoryMB ; int free = node . getFree ( ) . memoryMB ; if ( free < nodeReservedMemoryMB ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( node . getHost ( ) + " not enough memory." + " totalMB:" + total + " free:" + free + " limit:" + nodeReservedMemoryMB ) ; } return false ; } return true ; }
Check if the node has enough memory to run tasks
33,217
private boolean hasEnoughDiskSpace ( ClusterNode node ) { int total = node . getTotal ( ) . diskGB ; int free = node . getFree ( ) . diskGB ; if ( free < nodeReservedDiskGB ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( node . getHost ( ) + " not enough disk space." + " totalMB:" + total + " free:" + free + " limit:" + nodeReservedDiskGB ) ; } return false ; } return true ; }
Check if the ndoe has enough disk space to run tasks
33,218
public void selectiveClearing ( Key k , short scheme ) { if ( k == null ) { throw new NullPointerException ( "Key can not be null" ) ; } if ( ! membershipTest ( k ) ) { throw new IllegalArgumentException ( "Key is not a member" ) ; } int index = 0 ; int [ ] h = hash . hash ( k ) ; switch ( scheme ) { case RANDOM : index = randomRemove ( ) ; break ; case MINIMUM_FN : index = minimumFnRemove ( h ) ; break ; case MAXIMUM_FP : index = maximumFpRemove ( h ) ; break ; case RATIO : index = ratioRemove ( h ) ; break ; default : throw new AssertionError ( "Undefined selective clearing scheme" ) ; } clearBit ( index ) ; }
Performs the selective clearing for a given key .
33,219
private int minimumFnRemove ( int [ ] h ) { int minIndex = Integer . MAX_VALUE ; double minValue = Double . MAX_VALUE ; for ( int i = 0 ; i < nbHash ; i ++ ) { double keyWeight = getWeight ( keyVector [ h [ i ] ] ) ; if ( keyWeight < minValue ) { minIndex = h [ i ] ; minValue = keyWeight ; } } return minIndex ; }
Chooses the bit position that minimizes the number of false negative generated .
33,220
private int maximumFpRemove ( int [ ] h ) { int maxIndex = Integer . MIN_VALUE ; double maxValue = Double . MIN_VALUE ; for ( int i = 0 ; i < nbHash ; i ++ ) { double fpWeight = getWeight ( fpVector [ h [ i ] ] ) ; if ( fpWeight > maxValue ) { maxValue = fpWeight ; maxIndex = h [ i ] ; } } return maxIndex ; }
Chooses the bit position that maximizes the number of false positive removed .
33,221
private int ratioRemove ( int [ ] h ) { computeRatio ( ) ; int minIndex = Integer . MAX_VALUE ; double minValue = Double . MAX_VALUE ; for ( int i = 0 ; i < nbHash ; i ++ ) { if ( ratio [ h [ i ] ] < minValue ) { minValue = ratio [ h [ i ] ] ; minIndex = h [ i ] ; } } return minIndex ; }
Chooses the bit position that minimizes the number of false negative generated while maximizing . the number of false positive removed .
33,222
private void clearBit ( int index ) { if ( index < 0 || index >= vectorSize ) { throw new ArrayIndexOutOfBoundsException ( index ) ; } List < Key > kl = keyVector [ index ] ; List < Key > fpl = fpVector [ index ] ; int listSize = kl . size ( ) ; for ( int i = 0 ; i < listSize && ! kl . isEmpty ( ) ; i ++ ) { removeKey ( kl . get ( 0 ) , keyVector ) ; } kl . clear ( ) ; keyVector [ index ] . clear ( ) ; listSize = fpl . size ( ) ; for ( int i = 0 ; i < listSize && ! fpl . isEmpty ( ) ; i ++ ) { removeKey ( fpl . get ( 0 ) , fpVector ) ; } fpl . clear ( ) ; fpVector [ index ] . clear ( ) ; ratio [ index ] = 0.0 ; bits . clear ( index ) ; }
Clears a specified bit in the bit vector and keeps up - to - date the KeyList vectors .
33,223
@ SuppressWarnings ( "unchecked" ) private void createVector ( ) { fpVector = new List [ vectorSize ] ; keyVector = new List [ vectorSize ] ; ratio = new double [ vectorSize ] ; for ( int i = 0 ; i < vectorSize ; i ++ ) { fpVector [ i ] = Collections . synchronizedList ( new ArrayList < Key > ( ) ) ; keyVector [ i ] = Collections . synchronizedList ( new ArrayList < Key > ( ) ) ; ratio [ i ] = 0.0 ; } }
Creates and initialises the various vectors .
33,224
void setEpoch ( long e ) { Preconditions . checkState ( ! isEpochEstablished ( ) , "Epoch already established: epoch=%s" , myEpoch ) ; myEpoch = e ; for ( AsyncLogger l : loggers ) { l . setEpoch ( e ) ; } }
Set the epoch number used for all future calls .
33,225
public void setCommittedTxId ( long txid , boolean force ) { for ( AsyncLogger logger : loggers ) { logger . setCommittedTxId ( txid , force ) ; } }
Set the highest successfully committed txid seen by the writer . This should be called after a successful write to a quorum and is used for extra sanity checks against the protocol . See HDFS - 3863 .
33,226
< V > Map < AsyncLogger , V > waitForWriteQuorum ( QuorumCall < AsyncLogger , V > q , int timeoutMs , String operationName ) throws IOException { int majority = getMajoritySize ( ) ; int numLoggers = loggers . size ( ) ; checkMajoritySize ( majority , numLoggers ) ; return waitForQuorumInternal ( q , loggers . size ( ) , majority , numLoggers - majority + 1 , majority , timeoutMs , operationName ) ; }
Wait for a quorum of loggers to respond to the given call . If a quorum can t be achieved throws a QuorumException .
33,227
void appendHtmlReport ( StringBuilder sb ) { sb . append ( "<table class=\"storage\">" ) ; sb . append ( "<thead><tr><td>JN</td><td>Status</td></tr></thead>\n" ) ; for ( AsyncLogger l : loggers ) { sb . append ( "<tr>" ) ; sb . append ( "<td>" + JspUtil . escapeXml ( l . toString ( ) ) + "</td>" ) ; sb . append ( "<td>" ) ; l . appendHtmlReport ( sb ) ; sb . append ( "</td></tr>\n" ) ; } sb . append ( "</table>" ) ; }
Append an HTML - formatted status readout on the current state of the underlying loggers .
33,228
public static void quoteHtmlChars ( OutputStream output , byte [ ] buffer , int off , int len ) throws IOException { for ( int i = off ; i < off + len ; i ++ ) { switch ( buffer [ i ] ) { case '&' : output . write ( ampBytes ) ; break ; case '<' : output . write ( ltBytes ) ; break ; case '>' : output . write ( gtBytes ) ; break ; case '\'' : output . write ( aposBytes ) ; break ; case '"' : output . write ( quotBytes ) ; break ; default : output . write ( buffer , i , 1 ) ; } } }
Quote all of the active HTML characters in the given string as they are added to the buffer .
33,229
public static String quoteHtmlChars ( String item ) { if ( item == null ) { return null ; } byte [ ] bytes = item . getBytes ( ) ; if ( needsQuoting ( bytes , 0 , bytes . length ) ) { ByteArrayOutputStream buffer = new ByteArrayOutputStream ( ) ; try { quoteHtmlChars ( buffer , bytes , 0 , bytes . length ) ; } catch ( IOException ioe ) { } return buffer . toString ( ) ; } else { return item ; } }
Quote the given item to make it html - safe .
33,230
public static OutputStream quoteOutputStream ( final OutputStream out ) throws IOException { return new OutputStream ( ) { private byte [ ] data = new byte [ 1 ] ; public void write ( byte [ ] data , int off , int len ) throws IOException { quoteHtmlChars ( out , data , off , len ) ; } public void write ( int b ) throws IOException { data [ 0 ] = ( byte ) b ; quoteHtmlChars ( out , data , 0 , 1 ) ; } public void flush ( ) throws IOException { out . flush ( ) ; } public void close ( ) throws IOException { out . close ( ) ; } } ; }
Return an output stream that quotes all of the output .
33,231
public static String unquoteHtmlChars ( String item ) { if ( item == null ) { return null ; } int next = item . indexOf ( '&' ) ; if ( next == - 1 ) { return item ; } int len = item . length ( ) ; int posn = 0 ; StringBuilder buffer = new StringBuilder ( ) ; while ( next != - 1 ) { buffer . append ( item . substring ( posn , next ) ) ; if ( item . startsWith ( "&amp;" , next ) ) { buffer . append ( '&' ) ; next += 5 ; } else if ( item . startsWith ( "&apos;" , next ) ) { buffer . append ( '\'' ) ; next += 6 ; } else if ( item . startsWith ( "&gt;" , next ) ) { buffer . append ( '>' ) ; next += 4 ; } else if ( item . startsWith ( "&lt;" , next ) ) { buffer . append ( '<' ) ; next += 4 ; } else if ( item . startsWith ( "&quot;" , next ) ) { buffer . append ( '"' ) ; next += 6 ; } else { int end = item . indexOf ( ';' , next ) + 1 ; if ( end == 0 ) { end = len ; } throw new IllegalArgumentException ( "Bad HTML quoting for " + item . substring ( next , end ) ) ; } posn = next ; next = item . indexOf ( '&' , posn ) ; } buffer . append ( item . substring ( posn , len ) ) ; return buffer . toString ( ) ; }
Remove HTML quoting from a string .
33,232
public IndexRecord getIndexInformation ( String mapId , int reduce , Path fileName ) throws IOException { IndexInformation info = cache . get ( mapId ) ; if ( info == null ) { info = readIndexFileToCache ( fileName , mapId ) ; } else { synchronized ( info ) { while ( null == info . mapSpillRecord ) { try { info . wait ( ) ; } catch ( InterruptedException e ) { throw new IOException ( "Interrupted waiting for construction" , e ) ; } } } LOG . debug ( "IndexCache HIT: MapId " + mapId + " found" ) ; } if ( info . mapSpillRecord . size ( ) == 0 || info . mapSpillRecord . size ( ) < reduce ) { throw new IOException ( "Invalid request " + " Map Id = " + mapId + " Reducer = " + reduce + " Index Info Length = " + info . mapSpillRecord . size ( ) ) ; } return info . mapSpillRecord . getIndex ( reduce ) ; }
This method gets the index information for the given mapId and reduce . It reads the index file into cache if it is not already present .
33,233
public void removeMap ( String mapId ) { IndexInformation info = cache . remove ( mapId ) ; if ( info != null ) { totalMemoryUsed . addAndGet ( - info . getSize ( ) ) ; if ( ! queue . remove ( mapId ) ) { LOG . warn ( "Map ID" + mapId + " not found in queue!!" ) ; } } else { LOG . info ( "Map ID " + mapId + " not found in cache" ) ; } }
This method removes the map from the cache . It should be called when a map output on this tracker is discarded .
33,234
private synchronized void freeIndexInformation ( ) { while ( totalMemoryUsed . get ( ) > totalMemoryAllowed ) { String s = queue . remove ( ) ; IndexInformation info = cache . remove ( s ) ; if ( info != null ) { totalMemoryUsed . addAndGet ( - info . getSize ( ) ) ; } } }
Bring memory usage below totalMemoryAllowed .
33,235
private List < TaskTrackerStatus > getStatusesOnHost ( String hostName ) { List < TaskTrackerStatus > statuses = new ArrayList < TaskTrackerStatus > ( ) ; synchronized ( taskTrackers ) { for ( TaskTracker tt : taskTrackers . values ( ) ) { TaskTrackerStatus status = tt . getStatus ( ) ; if ( hostName . equals ( status . getHost ( ) ) ) { statuses . add ( status ) ; } } } return statuses ; }
Get all task tracker statuses on given host
33,236
void markCompletedTaskAttempt ( String taskTracker , TaskAttemptID taskid ) { Set < TaskAttemptID > taskset = trackerToMarkedTasksMap . get ( taskTracker ) ; if ( taskset == null ) { taskset = new TreeSet < TaskAttemptID > ( ) ; trackerToMarkedTasksMap . put ( taskTracker , taskset ) ; } taskset . add ( taskid ) ; LOG . debug ( "Marked '" + taskid + "' from '" + taskTracker + "'" ) ; }
Mark a task for removal later . This function assumes that the JobTracker is locked on entry .
33,237
void markCompletedJob ( JobInProgress job ) { for ( TaskInProgress tip : job . getTasks ( TaskType . JOB_SETUP ) ) { for ( TaskStatus taskStatus : tip . getTaskStatuses ( ) ) { if ( taskStatus . getRunState ( ) != TaskStatus . State . RUNNING && taskStatus . getRunState ( ) != TaskStatus . State . COMMIT_PENDING && taskStatus . getRunState ( ) != TaskStatus . State . UNASSIGNED ) { markCompletedTaskAttempt ( taskStatus . getTaskTracker ( ) , taskStatus . getTaskID ( ) ) ; } } } for ( TaskInProgress tip : job . getTasks ( TaskType . MAP ) ) { for ( TaskStatus taskStatus : tip . getTaskStatuses ( ) ) { if ( taskStatus . getRunState ( ) != TaskStatus . State . RUNNING && taskStatus . getRunState ( ) != TaskStatus . State . COMMIT_PENDING && taskStatus . getRunState ( ) != TaskStatus . State . FAILED_UNCLEAN && taskStatus . getRunState ( ) != TaskStatus . State . KILLED_UNCLEAN && taskStatus . getRunState ( ) != TaskStatus . State . UNASSIGNED ) { markCompletedTaskAttempt ( taskStatus . getTaskTracker ( ) , taskStatus . getTaskID ( ) ) ; } } } for ( TaskInProgress tip : job . getTasks ( TaskType . REDUCE ) ) { for ( TaskStatus taskStatus : tip . getTaskStatuses ( ) ) { if ( taskStatus . getRunState ( ) != TaskStatus . State . RUNNING && taskStatus . getRunState ( ) != TaskStatus . State . COMMIT_PENDING && taskStatus . getRunState ( ) != TaskStatus . State . FAILED_UNCLEAN && taskStatus . getRunState ( ) != TaskStatus . State . KILLED_UNCLEAN && taskStatus . getRunState ( ) != TaskStatus . State . UNASSIGNED ) { markCompletedTaskAttempt ( taskStatus . getTaskTracker ( ) , taskStatus . getTaskID ( ) ) ; } } } }
Mark all non - running jobs of the job for pruning . This function assumes that the JobTracker is locked on entry .
33,238
synchronized public boolean isBlacklisted ( String trackerID ) { TaskTrackerStatus status = getTaskTrackerStatus ( trackerID ) ; if ( status != null ) { return faultyTrackers . isBlacklisted ( status . getHost ( ) ) ; } return false ; }
Whether the tracker is blacklisted or not
33,239
void addNewTracker ( TaskTracker taskTracker ) { TaskTrackerStatus status = taskTracker . getStatus ( ) ; trackerExpiryQueue . add ( status ) ; String hostname = status . getHost ( ) ; if ( getNode ( status . getTrackerName ( ) ) == null ) { resolveAndAddToTopology ( hostname ) ; } Set < TaskTracker > trackers = hostnameToTaskTracker . get ( hostname ) ; if ( trackers == null ) { trackers = Collections . synchronizedSet ( new HashSet < TaskTracker > ( ) ) ; hostnameToTaskTracker . put ( hostname , trackers ) ; } statistics . taskTrackerAdded ( status . getTrackerName ( ) ) ; getInstrumentation ( ) . addTrackers ( 1 ) ; LOG . info ( "Adding tracker " + status . getTrackerName ( ) + " to host " + hostname ) ; trackers . add ( taskTracker ) ; }
Adds a new node to the jobtracker . It involves adding it to the expiry thread and adding it for resolution
33,240
public int getNextHeartbeatInterval ( ) { int clusterSize = getClusterStatus ( ) . getTaskTrackers ( ) ; int heartbeatInterval = Math . max ( ( int ) ( 1000 * HEARTBEATS_SCALING_FACTOR * Math . ceil ( ( double ) clusterSize / NUM_HEARTBEATS_IN_SECOND ) ) , HEARTBEAT_INTERVAL_MIN ) ; return heartbeatInterval ; }
Calculates next heartbeat interval using cluster size . Heartbeat interval is incremented by 1 second for every 100 nodes by default .
33,241
private boolean inHostsList ( TaskTrackerStatus status ) { Set < String > hostsList = hostsReader . getHosts ( ) ; return ( hostsList . isEmpty ( ) || hostsList . contains ( status . getHost ( ) ) ) ; }
Return if the specified tasktracker is in the hosts list if one was configured . If none was configured then this returns true .
33,242
private boolean inExcludedHostsList ( TaskTrackerStatus status ) { Set < String > excludeList = hostsReader . getExcludedHosts ( ) ; return excludeList . contains ( status . getHost ( ) ) ; }
Return if the specified tasktracker is in the exclude list .
33,243
void incrementReservations ( TaskType type , int reservedSlots ) { if ( type . equals ( TaskType . MAP ) ) { reservedMapSlots += reservedSlots ; } else if ( type . equals ( TaskType . REDUCE ) ) { reservedReduceSlots += reservedSlots ; } }
This method assumes the caller has JobTracker lock .
33,244
synchronized boolean processHeartbeat ( TaskTrackerStatus trackerStatus , boolean initialContact ) { String trackerName = trackerStatus . getTrackerName ( ) ; synchronized ( taskTrackers ) { synchronized ( trackerExpiryQueue ) { boolean seenBefore = updateTaskTrackerStatus ( trackerName , trackerStatus ) ; TaskTracker taskTracker = getTaskTracker ( trackerName ) ; if ( initialContact ) { if ( seenBefore ) { LOG . warn ( "initialContact but seenBefore from Tracker : " + trackerName ) ; lostTaskTracker ( taskTracker ) ; } } else { if ( ! seenBefore ) { LOG . warn ( "Status from unknown Tracker : " + trackerName ) ; updateTaskTrackerStatus ( trackerName , null ) ; return false ; } } if ( initialContact ) { if ( isBlacklisted ( trackerName ) ) { faultyTrackers . incrBlackListedTrackers ( 1 ) ; } addNewTracker ( taskTracker ) ; } } } updateTaskStatuses ( trackerStatus ) ; updateNodeHealthStatus ( trackerStatus ) ; return true ; }
Process incoming heartbeat messages from the task trackers .
33,245
private void addJobForCleanup ( JobID id ) { for ( String taskTracker : taskTrackers . keySet ( ) ) { LOG . debug ( "Marking job " + id + " for cleanup by tracker " + taskTracker ) ; synchronized ( trackerToJobsToCleanup ) { Set < JobID > jobsToKill = trackerToJobsToCleanup . get ( taskTracker ) ; if ( jobsToKill == null ) { jobsToKill = new HashSet < JobID > ( ) ; trackerToJobsToCleanup . put ( taskTracker , jobsToKill ) ; } jobsToKill . add ( id ) ; } } }
Add a job to cleanup for the tracker .
33,246
private List < TaskTrackerAction > getJobsForCleanup ( String taskTracker ) { Set < JobID > jobs = null ; synchronized ( trackerToJobsToCleanup ) { jobs = trackerToJobsToCleanup . remove ( taskTracker ) ; } if ( jobs != null ) { List < TaskTrackerAction > killList = new ArrayList < TaskTrackerAction > ( ) ; for ( JobID killJobId : jobs ) { killList . add ( new KillJobAction ( killJobId ) ) ; LOG . debug ( taskTracker + " -> KillJobAction: " + killJobId ) ; } return killList ; } return null ; }
A tracker wants to know if any job needs cleanup because the job completed .
33,247
List < Task > getSetupAndCleanupTasks ( TaskTrackerStatus taskTracker ) throws IOException { int maxMapTasks = taskScheduler . getMaxSlots ( taskTracker , TaskType . MAP ) ; int maxReduceTasks = taskScheduler . getMaxSlots ( taskTracker , TaskType . REDUCE ) ; int numMaps = taskTracker . countOccupiedMapSlots ( ) ; int numReduces = taskTracker . countOccupiedReduceSlots ( ) ; int numTaskTrackers = getClusterStatus ( ) . getTaskTrackers ( ) ; int numUniqueHosts = getNumberOfUniqueHosts ( ) ; List < JobInProgress > cachedJobs = new ArrayList < JobInProgress > ( ) ; synchronized ( jobs ) { cachedJobs . addAll ( jobs . values ( ) ) ; } Task t = null ; if ( numMaps < maxMapTasks ) { for ( JobInProgress job : cachedJobs ) { t = job . obtainJobCleanupTask ( taskTracker , numTaskTrackers , numUniqueHosts , true ) ; if ( t != null ) { return Collections . singletonList ( t ) ; } } for ( JobInProgress job : cachedJobs ) { t = job . obtainTaskCleanupTask ( taskTracker , true ) ; if ( t != null ) { return Collections . singletonList ( t ) ; } } for ( JobInProgress job : cachedJobs ) { t = job . obtainJobSetupTask ( taskTracker , numTaskTrackers , numUniqueHosts , true ) ; if ( t != null ) { return Collections . singletonList ( t ) ; } } } if ( numReduces < maxReduceTasks ) { for ( JobInProgress job : cachedJobs ) { t = job . obtainJobCleanupTask ( taskTracker , numTaskTrackers , numUniqueHosts , false ) ; if ( t != null ) { return Collections . singletonList ( t ) ; } } for ( JobInProgress job : cachedJobs ) { t = job . obtainTaskCleanupTask ( taskTracker , false ) ; if ( t != null ) { return Collections . singletonList ( t ) ; } } for ( JobInProgress job : cachedJobs ) { t = job . obtainJobSetupTask ( taskTracker , numTaskTrackers , numUniqueHosts , false ) ; if ( t != null ) { return Collections . singletonList ( t ) ; } } } return null ; }
returns cleanup tasks first then setup tasks .
33,248
public JobID getNewJobId ( ) throws IOException { JobID id = new JobID ( getTrackerIdentifier ( ) , nextJobId . getAndIncrement ( ) ) ; UserGroupInformation ugi = UserGroupInformation . getCurrentUGI ( ) ; jobToUserMap . put ( id , ugi . getUserName ( ) ) ; LOG . info ( "Job id " + id + " assigned to user " + ugi . getUserName ( ) ) ; return id ; }
Allocates a new JobId string .
33,249
protected synchronized JobStatus addJob ( JobID jobId , JobInProgress job ) { totalSubmissions ++ ; synchronized ( jobs ) { synchronized ( taskScheduler ) { jobs . put ( job . getProfile ( ) . getJobID ( ) , job ) ; for ( JobInProgressListener listener : jobInProgressListeners ) { try { listener . jobAdded ( job ) ; } catch ( IOException ioe ) { LOG . warn ( "Failed to add and so skipping the job : " + job . getJobID ( ) + ". Exception : " + ioe ) ; } } } } myInstrumentation . submitJob ( job . getJobConf ( ) , jobId ) ; String jobName = job . getJobConf ( ) . getJobName ( ) ; int jobNameLen = 64 ; if ( jobName . length ( ) > jobNameLen ) { jobName = jobName . substring ( 0 , jobNameLen ) ; } LOG . info ( "Job " + jobId + "(" + jobName + ") added successfully for user '" + job . getJobConf ( ) . getUser ( ) + "' to queue '" + job . getJobConf ( ) . getQueueName ( ) + "'" + ", source " + job . getJobConf ( ) . getJobSource ( ) ) ; return job . getStatus ( ) ; }
Adds a job to the jobtracker . Make sure that the checks are inplace before adding a job . This is the core job submission logic
33,250
private void checkAccess ( JobInProgress job , QueueManager . QueueOperation oper ) throws IOException { UserGroupInformation ugi = UserGroupInformation . getCurrentUGI ( ) ; checkAccess ( job , oper , ugi ) ; }
related to the job .
33,251
private void checkAccess ( JobInProgress job , QueueManager . QueueOperation oper , UserGroupInformation ugi ) throws IOException { String queue = job . getProfile ( ) . getQueueName ( ) ; if ( ! queueManager . hasAccess ( queue , job , oper , ugi ) ) { throw new AccessControlException ( "User " + ugi . getUserName ( ) + " cannot perform " + "operation " + oper + " on queue " + queue + ".\n Please run \"hadoop queue -showacls\" " + "command to find the queues you have access" + " to ." ) ; } }
use the passed ugi for checking the access
33,252
public synchronized void failJob ( JobInProgress job ) { if ( null == job ) { LOG . info ( "Fail on null job is not valid" ) ; return ; } JobStatus prevStatus = ( JobStatus ) job . getStatus ( ) . clone ( ) ; LOG . info ( "Failing job " + job . getJobID ( ) ) ; job . fail ( ) ; JobStatus newStatus = ( JobStatus ) job . getStatus ( ) . clone ( ) ; if ( prevStatus . getRunState ( ) != newStatus . getRunState ( ) ) { JobStatusChangeEvent event = new JobStatusChangeEvent ( job , EventType . RUN_STATE_CHANGED , prevStatus , newStatus ) ; updateJobInProgressListeners ( event ) ; } }
Fail a job and inform the listeners . Other components in the framework should use this to fail a job .
33,253
public synchronized void setJobPriority ( JobID jobid , String priority ) throws IOException { JobInProgress job = jobs . get ( jobid ) ; if ( null == job ) { LOG . info ( "setJobPriority(): JobId " + jobid . toString ( ) + " is not a valid job" ) ; return ; } checkAccess ( job , QueueManager . QueueOperation . ADMINISTER_JOBS ) ; JobPriority newPriority = JobPriority . valueOf ( priority ) ; setJobPriority ( jobid , newPriority ) ; }
Set the priority of a job
33,254
TaskStatus [ ] getTaskStatuses ( TaskID tipid ) { TaskInProgress tip = getTip ( tipid ) ; return ( tip == null ? new TaskStatus [ 0 ] : tip . getTaskStatuses ( ) ) ; }
Get all the TaskStatuses from the tipid .
33,255
TaskStatus getTaskStatus ( TaskAttemptID taskid ) { TaskInProgress tip = getTip ( taskid . getTaskID ( ) ) ; return ( tip == null ? null : tip . getTaskStatus ( taskid ) ) ; }
Returns the TaskStatus for a particular taskid .
33,256
Counters getTipCounters ( TaskID tipid ) { TaskInProgress tip = getTip ( tipid ) ; return ( tip == null ? null : tip . getCounters ( ) ) ; }
Returns the counters for the specified task in progress .
33,257
synchronized void setJobPriority ( JobID jobId , JobPriority priority ) { JobInProgress job = jobs . get ( jobId ) ; if ( job != null ) { synchronized ( taskScheduler ) { JobStatus oldStatus = ( JobStatus ) job . getStatus ( ) . clone ( ) ; job . setPriority ( priority ) ; JobStatus newStatus = ( JobStatus ) job . getStatus ( ) . clone ( ) ; JobStatusChangeEvent event = new JobStatusChangeEvent ( job , EventType . PRIORITY_CHANGED , oldStatus , newStatus ) ; updateJobInProgressListeners ( event ) ; } } else { LOG . warn ( "Trying to change the priority of an unknown job: " + jobId ) ; } }
Change the run - time priority of the given job .
33,258
void updateTaskStatuses ( TaskTrackerStatus status ) { String trackerName = status . getTrackerName ( ) ; for ( TaskStatus report : status . getTaskReports ( ) ) { report . setTaskTracker ( trackerName ) ; TaskAttemptID taskId = report . getTaskID ( ) ; if ( report . getRunState ( ) != TaskStatus . State . UNASSIGNED ) { expireLaunchingTasks . removeTask ( taskId ) ; } JobInProgress job = getJob ( taskId . getJobID ( ) ) ; if ( job == null ) { synchronized ( trackerToJobsToCleanup ) { Set < JobID > jobs = trackerToJobsToCleanup . get ( trackerName ) ; if ( jobs == null ) { jobs = new HashSet < JobID > ( ) ; trackerToJobsToCleanup . put ( trackerName , jobs ) ; } jobs . add ( taskId . getJobID ( ) ) ; } continue ; } if ( ! job . inited ( ) ) { synchronized ( trackerToTasksToCleanup ) { Set < TaskAttemptID > tasks = trackerToTasksToCleanup . get ( trackerName ) ; if ( tasks == null ) { tasks = new HashSet < TaskAttemptID > ( ) ; trackerToTasksToCleanup . put ( trackerName , tasks ) ; } tasks . add ( taskId ) ; } continue ; } TaskInProgress tip = taskidToTIPMap . get ( taskId ) ; if ( tip != null ) { JobStatus prevStatus = ( JobStatus ) job . getStatus ( ) . clone ( ) ; job . updateTaskStatus ( tip , ( TaskStatus ) report . clone ( ) ) ; JobStatus newStatus = ( JobStatus ) job . getStatus ( ) . clone ( ) ; if ( prevStatus . getRunState ( ) != newStatus . getRunState ( ) ) { JobStatusChangeEvent event = new JobStatusChangeEvent ( job , EventType . RUN_STATE_CHANGED , prevStatus , newStatus ) ; updateJobInProgressListeners ( event ) ; } } else { LOG . info ( "Serious problem. While updating status, cannot find taskid " + report . getTaskID ( ) ) ; } List < TaskAttemptID > failedFetchMaps = report . getFetchFailedMaps ( ) ; if ( failedFetchMaps != null ) { TaskAttemptID reportingAttempt = report . getTaskID ( ) ; for ( TaskAttemptID mapTaskId : failedFetchMaps ) { TaskInProgress failedFetchMap = taskidToTIPMap . get ( mapTaskId ) ; if ( failedFetchMap != null ) { String failedFetchTrackerName = getAssignedTracker ( mapTaskId ) ; if ( failedFetchTrackerName == null ) { failedFetchTrackerName = "Lost task tracker" ; } ( ( JobInProgress ) failedFetchMap . getJob ( ) ) . fetchFailureNotification ( reportingAttempt , failedFetchMap , mapTaskId , failedFetchTrackerName ) ; } } } } }
Accept and process a new TaskTracker profile . We might have known about the TaskTracker previously or it might be brand - new . All task - tracker structures have already been updated . Just process the contained tasks and any jobs that might be affected .
33,259
void lostTaskTracker ( TaskTracker taskTracker ) { String trackerName = taskTracker . getTrackerName ( ) ; LOG . info ( "Lost tracker '" + trackerName + "'" ) ; synchronized ( trackerToJobsToCleanup ) { trackerToJobsToCleanup . remove ( trackerName ) ; } synchronized ( trackerToTasksToCleanup ) { trackerToTasksToCleanup . remove ( trackerName ) ; } Set < TaskAttemptIDWithTip > lostTasks = trackerToTaskMap . get ( trackerName ) ; trackerToTaskMap . remove ( trackerName ) ; if ( lostTasks != null ) { Set < JobInProgress > jobsWithFailures = new HashSet < JobInProgress > ( ) ; for ( TaskAttemptIDWithTip oneTask : lostTasks ) { TaskAttemptID taskId = oneTask . attemptId ; TaskInProgress tip = oneTask . tip ; JobInProgress job = ( JobInProgress ) tip . getJob ( ) ; if ( ! tip . isComplete ( ) || ( tip . isMapTask ( ) && ! tip . isJobSetupTask ( ) && job . desiredReduces ( ) != 0 ) ) { if ( job . getStatus ( ) . getRunState ( ) == JobStatus . RUNNING || job . getStatus ( ) . getRunState ( ) == JobStatus . PREP ) { TaskStatus . State killState = ( tip . isRunningTask ( taskId ) && ! tip . isJobSetupTask ( ) && ! tip . isJobCleanupTask ( ) ) ? TaskStatus . State . KILLED_UNCLEAN : TaskStatus . State . KILLED ; job . failedTask ( tip , taskId , ( "Lost task tracker: " + trackerName + " at " + new Date ( ) ) , ( tip . isMapTask ( ) ? TaskStatus . Phase . MAP : TaskStatus . Phase . REDUCE ) , killState , trackerName ) ; jobsWithFailures . add ( job ) ; } } else { markCompletedTaskAttempt ( trackerName , taskId ) ; } } for ( JobInProgress job : jobsWithFailures ) { String reason = "Tracker went down" ; job . addTrackerTaskFailure ( trackerName , taskTracker , reason ) ; } taskTracker . cancelAllReservations ( ) ; removeMarkedTasks ( trackerName ) ; } }
We lost the task tracker! All task - tracker structures have already been updated . Just process the contained tasks and any jobs that might be affected .
33,260
private void removeTracker ( TaskTracker tracker ) { String trackerName = tracker . getTrackerName ( ) ; lostTaskTracker ( tracker ) ; TaskTrackerStatus status = tracker . getStatus ( ) ; if ( isBlacklisted ( trackerName ) ) { faultyTrackers . decrBlackListedTrackers ( 1 ) ; } updateTaskTrackerStatus ( trackerName , null ) ; statistics . taskTrackerRemoved ( trackerName ) ; getInstrumentation ( ) . decTrackers ( 1 ) ; }
Remove a tracker from the system
33,261
public static void main ( String argv [ ] ) throws IOException , InterruptedException { StringUtils . startupShutdownMessage ( JobTracker . class , argv , LOG ) ; try { if ( argv . length == 0 ) { JobTracker tracker = startTracker ( new JobConf ( ) ) ; tracker . offerService ( ) ; return ; } if ( "-instance" . equals ( argv [ 0 ] ) && argv . length == 2 ) { int instance = Integer . parseInt ( argv [ 1 ] ) ; if ( instance == 0 || instance == 1 ) { JobConf conf = new JobConf ( ) ; JobConf . overrideConfiguration ( conf , instance ) ; JobTracker tracker = startTracker ( conf ) ; tracker . offerService ( ) ; return ; } } if ( "-dumpConfiguration" . equals ( argv [ 0 ] ) && argv . length == 1 ) { dumpConfiguration ( new PrintWriter ( System . out ) ) ; return ; } System . out . println ( "usage: JobTracker [-dumpConfiguration]" ) ; System . out . println ( " JobTracker [-instance <0|1>]" ) ; System . exit ( - 1 ) ; } catch ( Throwable e ) { LOG . fatal ( StringUtils . stringifyException ( e ) ) ; System . exit ( - 1 ) ; } }
Start the JobTracker process . This is used only for debugging . As a rule JobTracker should be run as part of the DFS Namenode process .
33,262
private static void dumpConfiguration ( Writer writer ) throws IOException { Configuration . dumpConfiguration ( new JobConf ( ) , writer ) ; writer . write ( "\n" ) ; QueueManager . dumpConfiguration ( writer ) ; writer . write ( "\n" ) ; }
Dumps the configuration properties in Json format
33,263
private void checkMemoryRequirements ( JobInProgress job ) throws IOException { if ( ! perTaskMemoryConfigurationSetOnJT ( ) ) { LOG . debug ( "Per-Task memory configuration is not set on JT. " + "Not checking the job for invalid memory requirements." ) ; return ; } boolean invalidJob = false ; String msg = "" ; long maxMemForMapTask = job . getMemoryForMapTask ( ) ; long maxMemForReduceTask = job . getMemoryForReduceTask ( ) ; if ( maxMemForMapTask == JobConf . DISABLED_MEMORY_LIMIT || maxMemForReduceTask == JobConf . DISABLED_MEMORY_LIMIT ) { invalidJob = true ; msg = "Invalid job requirements." ; } if ( maxMemForMapTask > limitMaxMemForMapTasks || maxMemForReduceTask > limitMaxMemForReduceTasks ) { invalidJob = true ; msg = "Exceeds the cluster's max-memory-limit." ; } if ( invalidJob ) { StringBuilder jobStr = new StringBuilder ( ) . append ( job . getJobID ( ) . toString ( ) ) . append ( "(" ) . append ( maxMemForMapTask ) . append ( " memForMapTasks " ) . append ( maxMemForReduceTask ) . append ( " memForReduceTasks): " ) ; LOG . warn ( jobStr . toString ( ) + msg ) ; throw new IOException ( jobStr . toString ( ) + msg ) ; } }
Check the job if it has invalid requirements and throw and IOException if does have .
33,264
private void updateTotalTaskCapacity ( TaskTrackerStatus status ) { int mapSlots = taskScheduler . getMaxSlots ( status , TaskType . MAP ) ; String trackerName = status . getTrackerName ( ) ; Integer oldMapSlots = trackerNameToMapSlots . get ( trackerName ) ; if ( oldMapSlots == null ) { oldMapSlots = 0 ; } int delta = mapSlots - oldMapSlots ; if ( delta != 0 ) { totalMapTaskCapacity += delta ; trackerNameToMapSlots . put ( trackerName , mapSlots ) ; LOG . info ( "Changing map slot count due to " + trackerName + " from " + oldMapSlots + " to " + mapSlots + ", totalMap = " + totalMapTaskCapacity ) ; } int reduceSlots = taskScheduler . getMaxSlots ( status , TaskType . REDUCE ) ; Integer oldReduceSlots = trackerNameToReduceSlots . get ( trackerName ) ; if ( oldReduceSlots == null ) { oldReduceSlots = 0 ; } delta = reduceSlots - oldReduceSlots ; if ( delta != 0 ) { totalReduceTaskCapacity += delta ; trackerNameToReduceSlots . put ( trackerName , reduceSlots ) ; LOG . info ( "Changing reduce slot count due to " + trackerName + " from " + oldReduceSlots + " to " + reduceSlots + ", totalReduce = " + totalReduceTaskCapacity ) ; } }
Update totalMapTaskCapacity and totalReduceTaskCapacity to resolve the number of slots changed in a tasktracker . The change could be from TaskScheduler or TaskTrackerStatus
33,265
private void removeTaskTrackerCapacity ( TaskTrackerStatus status ) { Integer mapSlots = trackerNameToMapSlots . remove ( status . getTrackerName ( ) ) ; if ( mapSlots == null ) { mapSlots = 0 ; } totalMapTaskCapacity -= mapSlots ; Integer reduceSlots = trackerNameToReduceSlots . remove ( status . getTrackerName ( ) ) ; if ( reduceSlots == null ) { reduceSlots = 0 ; } totalReduceTaskCapacity -= reduceSlots ; LOG . info ( "Removing " + mapSlots + " map slots, " + reduceSlots + " reduce slots due to " + status . getTrackerName ( ) + ", totalMap = " + totalMapTaskCapacity + ", totalReduce = " + totalReduceTaskCapacity ) ; }
Update totalMapTaskCapacity and totalReduceTaskCapacity for removing a tasktracker
33,266
void recoverSegments ( SyncTask task ) throws IOException { if ( ! prepareRecovery ( task ) ) { return ; } for ( InetSocketAddress jn : journalNodes ) { if ( isLocalIpAddress ( jn . getAddress ( ) ) && jn . getPort ( ) == journalNode . getPort ( ) ) { continue ; } try { List < EditLogFile > remoteLogFiles = getManifest ( jn , task . journal , task . recoveryStartTxid ) ; for ( EditLogFile relf : remoteLogFiles ) { recoverSegment ( jn , relf , task ) ; } if ( ! task . hasMissingValidSegments ( ) ) { LOG . info ( logMsg + "recovery finished." ) ; break ; } } catch ( Exception e ) { LOG . error ( logMsg + "error" , e ) ; continue ; } } }
Recovers a single segment
33,267
private List < EditLogFile > getManifest ( InetSocketAddress jn , Journal journal , long minTxId ) throws IOException { String m = DFSUtil . getHTMLContentWithTimeout ( new URL ( "http" , jn . getAddress ( ) . getHostAddress ( ) , jn . getPort ( ) , GetJournalManifestServlet . buildPath ( journal . getJournalId ( ) , minTxId , journal . getJournalStorage ( ) ) ) , httpConnectReadTimeoutMs , httpConnectReadTimeoutMs ) ; return convertJsonToListManifest ( m ) ; }
Fetch manifest from a single given journal node over http .
33,268
public static List < EditLogFile > convertJsonToListManifest ( String json ) throws IOException { if ( json == null || json . isEmpty ( ) ) { return new ArrayList < EditLogFile > ( ) ; } TypeReference < List < String > > type = new TypeReference < List < String > > ( ) { } ; List < String > logFilesDesc = mapper . readValue ( json , type ) ; List < EditLogFile > logFiles = new ArrayList < EditLogFile > ( ) ; for ( String lf : logFilesDesc ) { logFiles . add ( new EditLogFile ( lf ) ) ; } return logFiles ; }
Get the map corresponding to the JSON string .
33,269
private boolean isLocalIpAddress ( InetAddress addr ) { if ( addr . isAnyLocalAddress ( ) || addr . isLoopbackAddress ( ) ) return true ; try { return NetworkInterface . getByInetAddress ( addr ) != null ; } catch ( SocketException e ) { return false ; } }
Checks if the address is local .
33,270
synchronized void refreshAcls ( Configuration conf ) throws IOException { try { HashMap < String , AccessControlList > newAclsMap = getQueueAcls ( conf ) ; aclsMap = newAclsMap ; } catch ( Throwable t ) { String exceptionString = StringUtils . stringifyException ( t ) ; LOG . warn ( "Queue ACLs could not be refreshed because there was an " + "exception in parsing the configuration: " + exceptionString + ". Existing ACLs are retained." ) ; throw new IOException ( exceptionString ) ; } }
Refresh the acls for the configured queues in the system by reading it from mapred - queue - acls . xml .
33,271
public Socket get ( SocketAddress remote ) { synchronized ( multimap ) { List < Socket > sockList = multimap . get ( remote ) ; if ( sockList == null ) { return null ; } Iterator < Socket > iter = sockList . iterator ( ) ; while ( iter . hasNext ( ) ) { Socket candidate = iter . next ( ) ; iter . remove ( ) ; if ( ! candidate . isClosed ( ) ) { return candidate ; } } } return null ; }
Get a cached socket to the given address
33,272
public void put ( Socket sock ) { Preconditions . checkNotNull ( sock ) ; SocketAddress remoteAddr = sock . getRemoteSocketAddress ( ) ; if ( remoteAddr == null ) { LOG . warn ( "Cannot cache (unconnected) socket with no remote address: " + sock ) ; IOUtils . closeSocket ( sock ) ; return ; } Socket oldestSock = null ; synchronized ( multimap ) { if ( capacity == multimap . size ( ) ) { oldestSock = evictOldest ( ) ; } multimap . put ( remoteAddr , sock ) ; } if ( oldestSock != null ) { IOUtils . closeSocket ( oldestSock ) ; } }
Give an unused socket to the cache .
33,273
private Socket evictOldest ( ) { Iterator < Entry < SocketAddress , Socket > > iter = multimap . entries ( ) . iterator ( ) ; if ( ! iter . hasNext ( ) ) { throw new IllegalArgumentException ( "Cannot evict from empty cache!" ) ; } Entry < SocketAddress , Socket > entry = iter . next ( ) ; iter . remove ( ) ; return entry . getValue ( ) ; }
Evict the oldest entry in the cache .
33,274
public void clear ( ) { List < Socket > socketsToClear = new LinkedList < Socket > ( ) ; synchronized ( multimap ) { for ( Socket sock : multimap . values ( ) ) { socketsToClear . add ( sock ) ; } multimap . clear ( ) ; } for ( Socket sock : socketsToClear ) { IOUtils . closeSocket ( sock ) ; } }
Empty the cache and close all sockets .
33,275
public static void writeFile ( File file , long val ) throws IOException { AtomicFileOutputStream fos = new AtomicFileOutputStream ( file ) ; try { fos . write ( String . valueOf ( val ) . getBytes ( Charsets . UTF_8 ) ) ; fos . write ( '\n' ) ; fos . close ( ) ; fos = null ; } finally { if ( fos != null ) { fos . abort ( ) ; } } }
Atomically write the given value to the given file including fsyncing .
33,276
static public String normalize ( String path ) { if ( path == null || path . length ( ) == 0 ) return ROOT ; if ( path . charAt ( 0 ) != PATH_SEPARATOR ) { throw new IllegalArgumentException ( "Network Location path does not start with " + PATH_SEPARATOR_STR + ": " + path ) ; } int len = path . length ( ) ; if ( path . charAt ( len - 1 ) == PATH_SEPARATOR ) { return path . substring ( 0 , len - 1 ) ; } return path ; }
Normalize a path
33,277
private void recoverClusterManagerFromDisk ( HostsFileReader hostsReader ) throws IOException { LOG . info ( "Restoring state from " + new java . io . File ( conf . getCMStateFile ( ) ) . getAbsolutePath ( ) ) ; safeMode = true ; LOG . info ( "Safe mode is now: " + ( this . safeMode ? "ON" : "OFF" ) ) ; CoronaSerializer coronaSerializer = new CoronaSerializer ( conf ) ; coronaSerializer . readStartObjectToken ( "ClusterManager" ) ; coronaSerializer . readField ( "startTime" ) ; startTime = coronaSerializer . readValueAs ( Long . class ) ; coronaSerializer . readField ( "nodeManager" ) ; nodeManager = new NodeManager ( this , hostsReader , coronaSerializer ) ; nodeManager . setConf ( conf ) ; coronaSerializer . readField ( "sessionManager" ) ; sessionManager = new SessionManager ( this , coronaSerializer ) ; coronaSerializer . readField ( "sessionNotifier" ) ; sessionNotifier = new SessionNotifier ( sessionManager , this , metrics , coronaSerializer ) ; coronaSerializer . readEndObjectToken ( "ClusterManager" ) ; lastRestartTime = clock . getTime ( ) ; }
This method is used when the ClusterManager is restarting after going down while in Safe Mode . It starts the process of recovering the original CM state by reading back the state in JSON form .
33,278
protected void initLegalTypes ( ) { Map < Integer , Map < ResourceType , Integer > > cpuToResourcePartitioning = conf . getCpuToResourcePartitioning ( ) ; for ( Map . Entry < Integer , Map < ResourceType , Integer > > entry : cpuToResourcePartitioning . entrySet ( ) ) { for ( ResourceType type : entry . getValue ( ) . keySet ( ) ) { legalTypeSet . add ( type ) ; } } legalTypeSet = Collections . unmodifiableSet ( legalTypeSet ) ; }
Prepare the legal types allowed based on the resources available
33,279
protected boolean checkResourceRequestType ( List < ResourceRequest > requestList ) { for ( ResourceRequest req : requestList ) { if ( ! legalTypeSet . contains ( req . type ) ) { return false ; } } return true ; }
Check all the resource requests and ensure that they are legal .
33,280
protected void checkResourceRequestLimit ( List < ResourceRequest > requestList , String handle ) throws InvalidSessionHandle { ConfigManager configManager = getScheduler ( ) . getConfigManager ( ) ; Session session = sessionManager . getSession ( handle ) ; PoolInfo poolInfo = session . getPoolInfo ( ) ; if ( ! configManager . useRequestMax ( poolInfo ) ) { return ; } ResourceTypeCounter resourceTypeCounter = new ResourceTypeCounter ( ) ; for ( ResourceRequest req : requestList ) { resourceTypeCounter . incr ( req . type ) ; } for ( ResourceType resourceType : ResourceType . values ( ) ) { if ( configManager . getPoolMaximum ( poolInfo , resourceType ) < resourceTypeCounter . getCount ( resourceType ) ) { String failureMessage = "Session " + handle + " requested " + resourceTypeCounter . getCount ( resourceType ) + " resources for resource type " + resourceType + " but was only allowed " + configManager . getPoolMaximum ( poolInfo , resourceType ) + ", " + "so failing the job" ; LOG . error ( failureMessage ) ; throw new InvalidSessionHandle ( failureMessage ) ; } } }
Count the resources requested and fail the job if they are above the limit
33,281
public synchronized boolean setSafeMode ( boolean safeMode ) { if ( safeMode == false ) { LOG . info ( "Resetting the heartbeat times for all sessions" ) ; sessionManager . resetSessionsLastHeartbeatTime ( ) ; LOG . info ( "Resetting the heartbeat times for all nodes" ) ; nodeManager . resetNodesLastHeartbeatTime ( ) ; this . safeMode = false ; } try { ClusterManagerAvailabilityChecker . getPJTClient ( conf ) . setClusterManagerSafeModeFlag ( safeMode ) ; } catch ( IOException e ) { LOG . info ( "Exception while setting the safe mode flag in ProxyJobTracker: " + e . getMessage ( ) ) ; return false ; } catch ( TException e ) { LOG . info ( "Exception while setting the safe mode flag in ProxyJobTracker: " + e . getMessage ( ) ) ; return false ; } this . safeMode = safeMode ; LOG . info ( "Flag successfully set in ProxyJobTracker" ) ; LOG . info ( "Safe mode is now: " + ( this . safeMode ? "ON" : "OFF" ) ) ; return true ; }
Sets the Safe Mode flag on the Cluster Manager and on the ProxyJobTracker . If we fail to set the flag on the ProxyJobTracker return false which signals that setting the flag on the ProxyJobTracker failed . In that case we should run coronaadmin with the - forceSetSafeModeOnPJT or - forceUnsetSafeModeOnPJT options .
33,282
public boolean persistState ( ) { if ( ! safeMode ) { LOG . info ( "Cannot persist state because ClusterManager is not in Safe Mode" ) ; return false ; } try { JsonGenerator jsonGenerator = CoronaSerializer . createJsonGenerator ( conf ) ; jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "startTime" ) ; jsonGenerator . writeNumber ( startTime ) ; jsonGenerator . writeFieldName ( "nodeManager" ) ; nodeManager . write ( jsonGenerator ) ; jsonGenerator . writeFieldName ( "sessionManager" ) ; sessionManager . write ( jsonGenerator ) ; jsonGenerator . writeFieldName ( "sessionNotifier" ) ; sessionNotifier . write ( jsonGenerator ) ; jsonGenerator . writeEndObject ( ) ; jsonGenerator . close ( ) ; } catch ( IOException e ) { LOG . info ( "Could not persist the state: " , e ) ; return false ; } return true ; }
This function saves the state of the ClusterManager to disk .
33,283
public void nodeTimeout ( String nodeName ) { if ( nodeRestarter != null ) { nodeRestarter . delete ( nodeName ) ; } Set < String > sessions = nodeManager . getNodeSessions ( nodeName ) ; Set < ClusterNode . GrantId > grantsToRevoke = nodeManager . deleteNode ( nodeName ) ; if ( grantsToRevoke == null ) { return ; } handleRevokedGrants ( nodeName , grantsToRevoke ) ; handleDeadNode ( nodeName , sessions ) ; scheduler . notifyScheduler ( ) ; }
This is an internal api called to tell the cluster manager that a a particular node seems dysfunctional and that it should be removed from the cluster .
33,284
public void nodeAppRemoved ( String nodeName , ResourceType type ) { Set < String > sessions = nodeManager . getNodeSessions ( nodeName ) ; Set < ClusterNode . GrantId > grantsToRevoke = nodeManager . deleteAppFromNode ( nodeName , type ) ; if ( grantsToRevoke == null ) { return ; } Set < String > affectedSessions = new HashSet < String > ( ) ; for ( String sessionHandle : sessions ) { try { if ( sessionManager . getSession ( sessionHandle ) . getTypes ( ) . contains ( type ) ) { affectedSessions . add ( sessionHandle ) ; } } catch ( InvalidSessionHandle ex ) { LOG . warn ( "Found invalid session: " + sessionHandle + " while timing out node: " + nodeName ) ; } } handleDeadNode ( nodeName , affectedSessions ) ; handleRevokedGrants ( nodeName , grantsToRevoke ) ; scheduler . notifyScheduler ( ) ; }
This is an internal api called to tell the cluster manager that a particular type of resource is no longer available on a node .
33,285
private void handleRevokedGrants ( String nodeName , Set < ClusterNode . GrantId > grantsToRevoke ) { for ( ClusterNode . GrantId grantId : grantsToRevoke ) { String sessionHandle = grantId . getSessionId ( ) ; try { sessionManager . revokeResource ( sessionHandle , Collections . singletonList ( grantId . getRequestId ( ) ) ) ; } catch ( InvalidSessionHandle e ) { LOG . warn ( "Found invalid session: " + sessionHandle + " while timing out node: " + nodeName ) ; } } }
Process the grants removed from a node .
33,286
private void handleDeadNode ( String nodeName , Set < String > sessions ) { LOG . info ( "Notify sessions: " + sessions + " about dead node " + nodeName ) ; for ( String session : sessions ) { sessionNotifier . notifyDeadNode ( session , nodeName ) ; } }
All the sessions that had grants on this node should get notified
33,287
private void updateCurrentThreadName ( String status ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "DataXceiver for client " ) ; InetAddress ia ; if ( s != null && ( ia = s . getInetAddress ( ) ) != null ) { sb . append ( ia . toString ( ) ) ; } else { sb . append ( "unknown" ) ; } if ( status != null ) { sb . append ( " [" ) . append ( status ) . append ( "]" ) ; } Thread . currentThread ( ) . setName ( sb . toString ( ) ) ; }
Update the thread name to contain the current status . Use this only after this receiver has started on its thread i . e . outside the constructor .
33,288
void readMetadata ( DataInputStream in , VersionAndOpcode versionAndOpcode ) throws IOException { ReadMetadataHeader readMetadataHeader = new ReadMetadataHeader ( versionAndOpcode ) ; readMetadataHeader . readFields ( in ) ; final int namespaceId = readMetadataHeader . getNamespaceId ( ) ; Block block = new Block ( readMetadataHeader . getBlockId ( ) , 0 , readMetadataHeader . getGenStamp ( ) ) ; ReplicaToRead rtr ; if ( ( rtr = datanode . data . getReplicaToRead ( namespaceId , block ) ) == null || rtr . isInlineChecksum ( ) ) { throw new IOException ( "Read metadata from inline checksum file is not supported" ) ; } DataOutputStream out = null ; try { updateCurrentThreadName ( "reading metadata for block " + block ) ; out = new DataOutputStream ( NetUtils . getOutputStream ( s , datanode . socketWriteTimeout ) ) ; byte [ ] buf = BlockWithChecksumFileReader . getMetaData ( datanode . data , namespaceId , block ) ; out . writeByte ( DataTransferProtocol . OP_STATUS_SUCCESS ) ; out . writeInt ( buf . length ) ; out . write ( buf ) ; out . writeInt ( 0 ) ; } finally { IOUtils . closeStream ( out ) ; } }
Reads the metadata and sends the data in one DATA_CHUNK .
33,289
void getBlockCrc ( DataInputStream in , VersionAndOpcode versionAndOpcode ) throws IOException { BlockChecksumHeader blockChecksumHeader = new BlockChecksumHeader ( versionAndOpcode ) ; blockChecksumHeader . readFields ( in ) ; final int namespaceId = blockChecksumHeader . getNamespaceId ( ) ; final Block block = new Block ( blockChecksumHeader . getBlockId ( ) , 0 , blockChecksumHeader . getGenStamp ( ) ) ; DataOutputStream out = null ; ReplicaToRead ri = datanode . data . getReplicaToRead ( namespaceId , block ) ; if ( ri == null ) { throw new IOException ( "Unknown block" ) ; } updateCurrentThreadName ( "getting CRC checksum for block " + block ) ; try { out = new DataOutputStream ( NetUtils . getOutputStream ( s , datanode . socketWriteTimeout ) ) ; int blockCrc ; if ( ri . hasBlockCrcInfo ( ) ) { blockCrc = ri . getBlockCrc ( ) ; } else { try { if ( ri . isInlineChecksum ( ) ) { blockCrc = BlockInlineChecksumReader . getBlockCrc ( datanode , ri , namespaceId , block ) ; } else { blockCrc = BlockWithChecksumFileReader . getBlockCrc ( datanode , ri , namespaceId , block ) ; } } catch ( IOException ioe ) { LOG . warn ( "Exception when getting Block CRC" , ioe ) ; out . writeShort ( DataTransferProtocol . OP_STATUS_ERROR ) ; out . flush ( ) ; throw ioe ; } } out . writeShort ( DataTransferProtocol . OP_STATUS_SUCCESS ) ; out . writeLong ( blockCrc ) ; out . flush ( ) ; } finally { IOUtils . closeStream ( out ) ; } }
Get block data s CRC32 checksum .
33,290
private void copyBlock ( DataInputStream in , VersionAndOpcode versionAndOpcode ) throws IOException { CopyBlockHeader copyBlockHeader = new CopyBlockHeader ( versionAndOpcode ) ; copyBlockHeader . readFields ( in ) ; long startTime = System . currentTimeMillis ( ) ; int namespaceId = copyBlockHeader . getNamespaceId ( ) ; long blockId = copyBlockHeader . getBlockId ( ) ; long genStamp = copyBlockHeader . getGenStamp ( ) ; Block block = new Block ( blockId , 0 , genStamp ) ; if ( ! dataXceiverServer . balanceThrottler . acquire ( ) ) { LOG . info ( "Not able to copy block " + blockId + " to " + s . getRemoteSocketAddress ( ) + " because threads quota is exceeded." ) ; return ; } BlockSender blockSender = null ; DataOutputStream reply = null ; boolean isOpSuccess = true ; updateCurrentThreadName ( "Copying block " + block ) ; try { blockSender = new BlockSender ( namespaceId , block , 0 , - 1 , false , false , false , false , versionAndOpcode . getDataTransferVersion ( ) >= DataTransferProtocol . PACKET_INCLUDE_VERSION_VERSION , true , datanode , null ) ; OutputStream baseStream = NetUtils . getOutputStream ( s , datanode . socketWriteTimeout ) ; reply = new DataOutputStream ( new BufferedOutputStream ( baseStream , SMALL_BUFFER_SIZE ) ) ; long read = blockSender . sendBlock ( reply , baseStream , dataXceiverServer . balanceThrottler ) ; long readDuration = System . currentTimeMillis ( ) - startTime ; datanode . myMetrics . bytesReadLatency . inc ( readDuration ) ; datanode . myMetrics . bytesRead . inc ( ( int ) read ) ; if ( read > KB_RIGHT_SHIFT_MIN ) { datanode . myMetrics . bytesReadRate . inc ( ( int ) ( read >> KB_RIGHT_SHIFT_BITS ) , readDuration ) ; } datanode . myMetrics . blocksRead . inc ( ) ; LOG . info ( "Copied block " + block + " to " + s . getRemoteSocketAddress ( ) ) ; } catch ( IOException ioe ) { isOpSuccess = false ; throw ioe ; } finally { dataXceiverServer . balanceThrottler . release ( ) ; if ( isOpSuccess ) { try { reply . writeChar ( 'd' ) ; } catch ( IOException ignored ) { } } IOUtils . closeStream ( reply ) ; IOUtils . closeStream ( blockSender ) ; } }
Read a block from the disk and then sends it to a destination .
33,291
private void sendResponse ( Socket s , short opStatus , long timeout ) throws IOException { DataOutputStream reply = new DataOutputStream ( NetUtils . getOutputStream ( s , timeout ) ) ; reply . writeShort ( opStatus ) ; reply . flush ( ) ; }
Utility function for sending a response .
33,292
public T getNext ( ) throws IOException { try { return mapper . readValue ( jsonParser , clazz ) ; } catch ( EOFException e ) { return null ; } }
Get the next object from the trace .
33,293
public static List < List < Object > > runInsertSelect ( DBConnectionFactory connectionFactory , String sql , List < Object > sqlParams , boolean isWrite , int numRetries , int retryMaxInternalSec , boolean insert , boolean getGeneratedKeys ) throws IOException { int waitMS = 3000 ; for ( int i = 0 ; i < numRetries ; ++ i ) { Connection conn = null ; ResultSet generatedKeys = null ; PreparedStatement pstmt = null ; String url = null ; try { try { url = connectionFactory . getUrl ( isWrite ) ; } catch ( IOException ioe ) { LOG . warn ( "Cannot get DB URL, fall back to the default one" , ioe ) ; url = defaultUrls . get ( isWrite ) ; if ( url == null ) { throw ioe ; } } LOG . info ( "Attepting connection with URL " + url ) ; conn = connectionFactory . getConnection ( url ) ; defaultUrls . put ( isWrite , url ) ; pstmt = getPreparedStatement ( conn , sql , sqlParams , getGeneratedKeys ) ; if ( insert ) { int recordsUpdated = pstmt . executeUpdate ( ) ; LOG . info ( "rows inserted: " + recordsUpdated + " sql: " + sql ) ; List < List < Object > > results = null ; if ( getGeneratedKeys ) { generatedKeys = pstmt . getGeneratedKeys ( ) ; results = getResults ( generatedKeys ) ; } Thread . sleep ( connectionFactory . getDBOpsSleepTime ( ) + rand . nextInt ( 1000 ) ) ; return results ; } else { generatedKeys = pstmt . executeQuery ( ) ; List < List < Object > > results = getResults ( generatedKeys ) ; pstmt . clearBatch ( ) ; LOG . info ( "rows selected: " + results . size ( ) + " sql: " + sql ) ; Thread . sleep ( connectionFactory . getDBOpsSleepTime ( ) + rand . nextInt ( 1000 ) ) ; return results ; } } catch ( Exception e ) { LOG . info ( "Exception " + e + ". Will retry " + ( numRetries - i ) + " times." ) ; waitMS += waitMS ; if ( waitMS > retryMaxInternalSec * 1000 ) { waitMS = retryMaxInternalSec * 1000 ; } double waitTime = waitMS + waitMS * rand . nextDouble ( ) ; if ( i + 1 == numRetries ) { LOG . error ( "Still got Exception after " + numRetries + " retries." , e ) ; throw new IOException ( e ) ; } try { Thread . sleep ( ( long ) waitTime ) ; } catch ( InterruptedException ie ) { throw new IOException ( ie ) ; } } finally { DBUtils . close ( generatedKeys , new PreparedStatement [ ] { pstmt } , conn ) ; } } return null ; }
returned by the query . In the case of an insert returns null .
33,294
public void process ( DocumentAndOp doc , Analyzer analyzer ) throws IOException { if ( doc . getOp ( ) == DocumentAndOp . Op . DELETE || doc . getOp ( ) == DocumentAndOp . Op . UPDATE ) { deleteList . add ( doc . getTerm ( ) ) ; } if ( doc . getOp ( ) == DocumentAndOp . Op . INSERT || doc . getOp ( ) == DocumentAndOp . Op . UPDATE ) { if ( writer == null ) { writer = createWriter ( ) ; } writer . addDocument ( doc . getDocument ( ) , analyzer ) ; numDocs ++ ; } }
This method is used by the index update mapper and process a document operation into the current intermediate form .
33,295
protected void ensureInflated ( ) { if ( compressed != null ) { try { ByteArrayInputStream deflated = new ByteArrayInputStream ( compressed ) ; DataInput inflater = new DataInputStream ( new InflaterInputStream ( deflated ) ) ; readFieldsCompressed ( inflater ) ; compressed = null ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } } }
Must be called by all methods which access fields to ensure that the data has been uncompressed .
33,296
public static synchronized UnixUserGroupInformation getUgiForUser ( String userName ) { long now = System . currentTimeMillis ( ) ; long cutoffTime = now - ugiLifetime ; CachedUgi cachedUgi = ugiCache . get ( userName ) ; if ( cachedUgi != null && cachedUgi . getInitTime ( ) > cutoffTime ) return cachedUgi . getUgi ( ) ; UnixUserGroupInformation ugi = null ; try { ugi = getUgi ( userName ) ; } catch ( IOException e ) { return null ; } if ( ugiCache . size ( ) > CLEANUP_THRESHOLD ) { for ( Iterator < Map . Entry < String , CachedUgi > > it = ugiCache . entrySet ( ) . iterator ( ) ; it . hasNext ( ) ; ) { Map . Entry < String , CachedUgi > e = it . next ( ) ; if ( e . getValue ( ) . getInitTime ( ) < cutoffTime ) { it . remove ( ) ; } } } ugiCache . put ( ugi . getUserName ( ) , new CachedUgi ( ugi , now ) ) ; return ugi ; }
retrieve an ugi for a user . try the cache first if not found get it by running a shell command
33,297
static synchronized void saveToCache ( UnixUserGroupInformation ugi ) { ugiCache . put ( ugi . getUserName ( ) , new CachedUgi ( ugi , System . currentTimeMillis ( ) ) ) ; }
save an ugi to cache only for junit testing purposes
33,298
private static UnixUserGroupInformation getUgi ( String userName ) throws IOException { if ( userName == null || ! USERNAME_PATTERN . matcher ( userName ) . matches ( ) ) throw new IOException ( "Invalid username=" + userName ) ; String [ ] cmd = new String [ ] { "bash" , "-c" , "id -Gn '" + userName + "'" } ; String [ ] groups = Shell . execCommand ( cmd ) . split ( "\\s+" ) ; return new UnixUserGroupInformation ( userName , groups ) ; }
Get the ugi for a user by running shell command id - Gn
33,299
void start ( ) { if ( ! shouldRun ( conf ) ) { LOG . info ( "Not starting node health monitor" ) ; return ; } nodeHealthScriptScheduler = new Timer ( "NodeHealthMonitor-Timer" , true ) ; nodeHealthScriptScheduler . scheduleAtFixedRate ( timer , 0 , intervalTime ) ; }
Method used to start the Node health monitoring .