idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,300
void stop ( ) { if ( ! shouldRun ( conf ) ) { return ; } nodeHealthScriptScheduler . cancel ( ) ; if ( shexec != null ) { Process p = shexec . getProcess ( ) ; if ( p != null ) { p . destroy ( ) ; } } }
Method used to terminate the node health monitoring service .
33,301
public static synchronized void setName ( Class writableClass , String name ) { CLASS_TO_NAME . put ( writableClass , name ) ; NAME_TO_CLASS . put ( name , writableClass ) ; }
Set the name that a class should be known as to something other than the class name .
33,302
public static boolean parityExists ( FileStatus src , Codec codec , Configuration conf ) throws IOException { return ParityFilePair . getParityFile ( codec , src , conf ) != null ; }
Return whether if parity file of the source file exists or not
33,303
public static CoronaProxyJobTrackerService . Client getPJTClient ( CoronaConf conf ) throws IOException { InetSocketAddress address = NetUtils . createSocketAddr ( conf . getProxyJobTrackerThriftAddress ( ) ) ; TFramedTransport transport = new TFramedTransport ( new TSocket ( address . getHostName ( ) , address . getPort ( ) ) ) ; CoronaProxyJobTrackerService . Client client = new CoronaProxyJobTrackerService . Client ( new TBinaryProtocol ( transport ) ) ; try { transport . open ( ) ; } catch ( TException e ) { LOG . info ( "Transport Exception: " , e ) ; } return client ; }
Used for getting a client to the CoronaProxyJobTracker
33,304
public static void waitWhileClusterManagerInSafeMode ( CoronaConf conf ) throws IOException { CoronaProxyJobTrackerService . Client pjtClient = getPJTClient ( conf ) ; while ( true ) { try { if ( ! pjtClient . getClusterManagerSafeModeFlag ( ) ) { break ; } LOG . info ( "Safe mode flag is set on the ProxyJobTracker" ) ; try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { throw new IOException ( e ) ; } } catch ( TException e ) { throw new IOException ( "Could not check the safe mode flag on the ProxyJobTracker" , e ) ; } } }
This helper method simply polls the ProxyJobTracker if and until the clusterManagerSafeMode flag is set there .
33,305
void noZeroOrOneOrAddress ( String command ) { if ( isZeroCommand || isOneCommand ) { throwException ( CMD + command + " (zero|one) should not be specified" ) ; } if ( isAddressCommand ) { throwException ( CMD + command + " address should not be specified" ) ; } }
For commands that directly obtain information from zk
33,306
void eitherZeroOrOneOrAddress ( String command ) { if ( ! isAddressCommand && ! ( isZeroCommand ^ isOneCommand ) ) { throwException ( CMD + command + ": (zero|one) specified incorrectly" ) ; } if ( isAddressCommand && ( isZeroCommand || isOneCommand ) ) { throwException ( CMD + command + ": cannot specify address with (zero|one)" ) ; } }
Check if either - zero - one or - address are specified .
33,307
private static int actualArrayLength ( int recommended ) { if ( recommended > MAX_ARRAY_LENGTH ) { return MAX_ARRAY_LENGTH ; } else if ( recommended < MIN_ARRAY_LENGTH ) { return MIN_ARRAY_LENGTH ; } else { final int a = Integer . highestOneBit ( recommended ) ; return a == recommended ? a : a << 1 ; } }
compute actual length
33,308
public Iterator < E > shardIterator ( int shardId , int numShards ) { if ( shardId >= entries . length ) { return null ; } if ( shardId >= numShards ) { throw new IllegalArgumentException ( "Shard id must be less than total shards, shardId: " + shardId + ", numShards: " + numShards ) ; } return new SetIterator ( shardId , numShards ) ; }
Get iterator over a part of the blocks map .
33,309
private Configuration getConfFromContext ( ) { Configuration conf = ( Configuration ) getServletContext ( ) . getAttribute ( HttpServer . CONF_CONTEXT_ATTRIBUTE ) ; assert conf != null ; return conf ; }
Return the Configuration of the daemon hosting this servlet . This is populated when the HttpServer starts .
33,310
static void writeResponse ( Configuration conf , Writer out , String format ) throws IOException , BadFormatException { if ( FORMAT_JSON . equals ( format ) ) { Configuration . dumpConfiguration ( conf , out ) ; } else if ( FORMAT_XML . equals ( format ) ) { conf . writeXml ( out ) ; } else { throw new BadFormatException ( "Bad format: " + format ) ; } }
Guts of the servlet - extracted for easy testing .
33,311
public FSDataOutputStream create ( Path file , FsPermission permission , boolean overwrite , int bufferSize , short replication , long blockSize , Progressable progress ) throws IOException { final FTPClient client = connect ( ) ; Path workDir = new Path ( client . printWorkingDirectory ( ) ) ; Path absolute = makeAbsolute ( workDir , file ) ; if ( exists ( client , file ) ) { if ( overwrite ) { delete ( client , file ) ; } else { disconnect ( client ) ; throw new IOException ( "File already exists: " + file ) ; } } Path parent = absolute . getParent ( ) ; if ( parent == null || ! mkdirs ( client , parent , FsPermission . getDefault ( ) ) ) { parent = ( parent == null ) ? new Path ( "/" ) : parent ; disconnect ( client ) ; throw new IOException ( "create(): Mkdirs failed to create: " + parent ) ; } client . allocate ( bufferSize ) ; client . changeWorkingDirectory ( parent . toUri ( ) . getPath ( ) ) ; FSDataOutputStream fos = new FSDataOutputStream ( client . storeFileStream ( file . getName ( ) ) , statistics ) { public void close ( ) throws IOException { super . close ( ) ; if ( ! client . isConnected ( ) ) { throw new FTPException ( "Client not connected" ) ; } boolean cmdCompleted = client . completePendingCommand ( ) ; disconnect ( client ) ; if ( ! cmdCompleted ) { throw new FTPException ( "Could not complete transfer, Reply Code - " + client . getReplyCode ( ) ) ; } } } ; if ( ! FTPReply . isPositivePreliminary ( client . getReplyCode ( ) ) ) { fos . close ( ) ; throw new IOException ( "Unable to create file: " + file + ", Aborting" ) ; } return fos ; }
A stream obtained via this call must be closed before using other APIs of this class or else the invocation will block .
33,312
public void add ( InputSplit s ) throws IOException { if ( null == splits ) { throw new IOException ( "Uninitialized InputSplit" ) ; } if ( fill == splits . length ) { throw new IOException ( "Too many splits" ) ; } splits [ fill ++ ] = s ; totsize += s . getLength ( ) ; }
Add an InputSplit to this collection .
33,313
public String [ ] getLocations ( ) throws IOException { HashSet < String > hosts = new HashSet < String > ( ) ; for ( InputSplit s : splits ) { String [ ] hints = s . getLocations ( ) ; if ( hints != null && hints . length > 0 ) { for ( String host : hints ) { hosts . add ( host ) ; } } } return hosts . toArray ( new String [ hosts . size ( ) ] ) ; }
Collect a set of hosts from all child InputSplits .
33,314
public boolean await ( long timeoutMillis ) throws IOException { try { return connectLatch . await ( timeoutMillis , TimeUnit . MILLISECONDS ) ; } catch ( InterruptedException e ) { interruptedException ( "Interrupted waiting for connection (timeout = " + timeoutMillis + "ms.)" , e ) ; return false ; } }
Wait until either a specified timeout expires or a connection is created to ZooKeeper .
33,315
private void readIdToRequest ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "idToRequest" ) ; coronaSerializer . readStartObjectToken ( "idToRequest" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { Integer id = Integer . parseInt ( coronaSerializer . getFieldName ( ) ) ; idToRequest . put ( id , new ResourceRequestInfo ( coronaSerializer ) ) ; current = coronaSerializer . nextToken ( ) ; } }
Reads the idToRequest map from a JSON stream
33,316
private void readIdToPendingRequests ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "idToPendingRequests" ) ; coronaSerializer . readStartArrayToken ( "idToPendingRequests" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_ARRAY ) { pendingRequestsList . add ( coronaSerializer . jsonParser . getIntValue ( ) ) ; current = coronaSerializer . nextToken ( ) ; } }
Reads the idToPendingRequests map from a JSON stream
33,317
private void readIdToGrant ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "idToGrant" ) ; coronaSerializer . readStartObjectToken ( "idToGrant" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { Integer id = Integer . parseInt ( coronaSerializer . getFieldName ( ) ) ; ResourceGrant resourceGrant = coronaSerializer . readValueAs ( ResourceGrant . class ) ; idToGrant . put ( id , new ResourceGrant ( resourceGrant ) ) ; current = coronaSerializer . nextToken ( ) ; } }
Reads the idToGrant map from a JSON stream
33,318
private void readTypeToFirstWait ( CoronaSerializer coronaSerializer ) throws IOException { coronaSerializer . readField ( "typeToFirstWait" ) ; coronaSerializer . readStartObjectToken ( "typeToFirstWait" ) ; JsonToken current = coronaSerializer . nextToken ( ) ; while ( current != JsonToken . END_OBJECT ) { String resourceTypeStr = coronaSerializer . getFieldName ( ) ; Long wait = coronaSerializer . readValueAs ( Long . class ) ; current = coronaSerializer . nextToken ( ) ; if ( wait == - 1 ) { wait = null ; } typeToFirstWait . put ( ResourceType . valueOf ( resourceTypeStr ) , wait ) ; } }
Reads the typeToFirstWait map from the JSON stream
33,319
public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "idToRequest" ) ; jsonGenerator . writeStartObject ( ) ; for ( Integer id : idToRequest . keySet ( ) ) { jsonGenerator . writeFieldName ( id . toString ( ) ) ; idToRequest . get ( id ) . write ( jsonGenerator ) ; } jsonGenerator . writeEndObject ( ) ; jsonGenerator . writeFieldName ( "idToPendingRequests" ) ; jsonGenerator . writeStartArray ( ) ; for ( Integer id : idToPendingRequests . keySet ( ) ) { jsonGenerator . writeNumber ( id ) ; } jsonGenerator . writeEndArray ( ) ; jsonGenerator . writeFieldName ( "idToGrant" ) ; jsonGenerator . writeStartObject ( ) ; for ( Integer id : idToGrant . keySet ( ) ) { jsonGenerator . writeObjectField ( id . toString ( ) , idToGrant . get ( id ) ) ; } jsonGenerator . writeEndObject ( ) ; jsonGenerator . writeObjectField ( "status" , status ) ; jsonGenerator . writeStringField ( "sessionId" , sessionId ) ; jsonGenerator . writeBooleanField ( "deleted" , deleted ) ; jsonGenerator . writeNumberField ( "deletedTime" , deletedTime ) ; jsonGenerator . writeObjectField ( "info" , info ) ; jsonGenerator . writeNumberField ( "startTime" , startTime ) ; jsonGenerator . writeFieldName ( "poolInfo" ) ; poolInfo . write ( jsonGenerator ) ; jsonGenerator . writeFieldName ( "typeToFirstWait" ) ; jsonGenerator . writeStartObject ( ) ; for ( ResourceType resourceType : typeToFirstWait . keySet ( ) ) { Long wait = typeToFirstWait . get ( resourceType ) ; if ( wait == null ) { wait = new Long ( - 1 ) ; } jsonGenerator . writeNumberField ( resourceType . toString ( ) , wait ) ; } jsonGenerator . writeEndObject ( ) ; jsonGenerator . writeObjectField ( "expectedInfo" , expectedInfo ) ; jsonGenerator . writeObjectField ( "lastHeartbeat" , lastHeartbeat ) ; jsonGenerator . writeNumberField ( "lastSyncTime" , lastSyncTime ) ; jsonGenerator . writeEndObject ( ) ; }
Used to write the state of the Session instance to disk when we are persisting the state of the ClusterManager
33,320
public void restoreAfterSafeModeRestart ( ) { for ( Integer pendingRequestId : pendingRequestsList ) { ResourceRequestInfo request = idToRequest . get ( pendingRequestId ) ; incrementRequestCount ( request . getType ( ) , 1 ) ; addPendingRequest ( request ) ; } for ( Integer grantedRequestId : idToGrant . keySet ( ) ) { ResourceRequestInfo request = idToRequest . get ( grantedRequestId ) ; incrementRequestCount ( request . getType ( ) , 1 ) ; addGrantedRequest ( request ) ; } }
This method rebuilds members related to the Session instance which were not directly persisted themselves .
33,321
public void updateInfoUrlAndName ( String url , String name ) { this . info . url = url ; this . info . name = name ; }
Only update the session info url and name .
33,322
public ResourceRequestInfo getPendingRequestOnHost ( String host , ResourceType type ) { Context c = getContext ( type ) ; List < ResourceRequestInfo > hostReqs = c . hostToPendingRequests . get ( host ) ; if ( hostReqs != null ) { return hostReqs . get ( 0 ) ; } return null ; }
Find a pending request that wants to run on a given host .
33,323
public ResourceRequestInfo getPendingRequestOnRack ( String host , Node rack , ResourceType type ) { Context c = getContext ( type ) ; List < ResourceRequestInfo > rackReqs = c . rackToPendingRequests . get ( rack ) ; if ( rackReqs != null ) { for ( ResourceRequestInfo req : rackReqs ) { Set < String > excluded = req . getExcludeHosts ( ) ; if ( ! excluded . contains ( host ) ) { return req ; } } } return null ; }
Find a pending request that wants to run on a given rack .
33,324
public ResourceRequestInfo getPendingRequestForAny ( String host , ResourceType type ) { Context c = getContext ( type ) ; for ( ResourceRequestInfo req : c . anyHostRequests ) { Set < String > excluded = req . getExcludeHosts ( ) ; if ( ! excluded . contains ( host ) ) { return req ; } } return null ; }
Find a pending request that can run on any machine .
33,325
protected Context getContext ( ResourceType type ) { Context c = typeToContext . get ( type ) ; if ( c == null ) { c = new Context ( ) ; typeToContext . put ( type , c ) ; } return c ; }
Get the context for a resource type . Creates the context on demand if it doesn t exist .
33,326
protected void incrementRequestCount ( ResourceType type , int delta ) { Context c = getContext ( type ) ; int newRequestCount = c . requestCount + delta ; c . requestCount = newRequestCount ; if ( newRequestCount > c . maxConcurrentRequestCount ) { c . maxConcurrentRequestCount = newRequestCount ; } }
Increase a request count for a resource for this session . It cannot exceed the max concurrent request count .
33,327
protected void addPendingRequestForType ( ResourceRequestInfo req ) { Context c = getContext ( req . getType ( ) ) ; c . pendingRequests . add ( req ) ; c . pendingRequestCount ++ ; }
Add a pending request for a resource type .
33,328
protected void removePendingRequestForType ( ResourceRequestInfo req ) { Context c = getContext ( req . getType ( ) ) ; Object removed = Utilities . removeReference ( c . pendingRequests , req ) ; if ( removed != null ) { c . pendingRequestCount -- ; } }
Remove a pending resource request for a resource type
33,329
protected void addGrantedRequest ( ResourceRequestInfo req ) { Context c = getContext ( req . getType ( ) ) ; c . grantedRequests . add ( req ) ; c . fulfilledRequestCount ++ ; }
Add a granted request for a resource type .
33,330
protected void removeGrantedRequest ( ResourceRequestInfo req , boolean isRevoked ) { Context c = getContext ( req . getType ( ) ) ; Utilities . removeReference ( c . grantedRequests , req ) ; if ( isRevoked ) { c . fulfilledRequestCount -- ; c . revokedRequestCount ++ ; } }
Remove a granted request for a resource type .
33,331
public void requestResource ( List < ResourceRequestInfo > requestList ) { if ( deleted ) { throw new RuntimeException ( "Session: " + sessionId + " has been deleted" ) ; } for ( ResourceRequestInfo req : requestList ) { boolean newRequest = idToRequest . put ( req . getId ( ) , req ) == null ; if ( ! newRequest ) { LOG . warn ( "Duplicate request from Session: " + sessionId + "" + " request: " + req . getId ( ) ) ; continue ; } incrementRequestCount ( req . getType ( ) , 1 ) ; addPendingRequest ( req ) ; setTypeRequested ( req . getType ( ) ) ; } }
Request a list of resources
33,332
private void setTypeRequested ( ResourceType type ) { synchronized ( typeToFirstWait ) { if ( ! typeToFirstWait . containsKey ( type ) ) { typeToFirstWait . put ( type , null ) ; } } }
Set the resource type to requested for first wait metrics . The value will be set to null to denote the difference between waiting and requested and not waiting .
33,333
private void addPendingRequest ( ResourceRequestInfo req ) { idToPendingRequests . put ( req . getId ( ) , req ) ; if ( req . getHosts ( ) != null && req . getHosts ( ) . size ( ) > 0 ) { Context c = getContext ( req . getType ( ) ) ; for ( RequestedNode node : req . getRequestedNodes ( ) ) { String host = node . getHost ( ) ; List < ResourceRequestInfo > hostReqs = c . hostToPendingRequests . get ( host ) ; if ( hostReqs == null ) { hostReqs = new LinkedList < ResourceRequestInfo > ( ) ; c . hostToPendingRequests . put ( host , hostReqs ) ; } hostReqs . add ( req ) ; Node rack = node . getRack ( ) ; List < ResourceRequestInfo > rackReqs = c . rackToPendingRequests . get ( rack ) ; if ( rackReqs == null ) { rackReqs = new LinkedList < ResourceRequestInfo > ( ) ; c . rackToPendingRequests . put ( rack , rackReqs ) ; } rackReqs . add ( req ) ; } } Context c = getContext ( req . getType ( ) ) ; c . anyHostRequests . add ( req ) ; addPendingRequestForType ( req ) ; }
Add a request to the list of pending
33,334
private void removePendingRequest ( ResourceRequestInfo req ) { ResourceRequestInfo removed = idToPendingRequests . remove ( req . getId ( ) ) ; if ( removed != null ) { if ( req . getHosts ( ) != null && req . getHosts ( ) . size ( ) > 0 ) { Context c = getContext ( req . getType ( ) ) ; for ( RequestedNode node : req . getRequestedNodes ( ) ) { String host = node . getHost ( ) ; List < ResourceRequestInfo > hostReqs = c . hostToPendingRequests . get ( host ) ; Utilities . removeReference ( hostReqs , req ) ; if ( hostReqs . isEmpty ( ) ) { c . hostToPendingRequests . remove ( host ) ; } Node rack = node . getRack ( ) ; List < ResourceRequestInfo > rackReqs = c . rackToPendingRequests . get ( rack ) ; Utilities . removeReference ( rackReqs , req ) ; if ( rackReqs . isEmpty ( ) ) { c . rackToPendingRequests . remove ( rack ) ; } } } Context c = getContext ( req . getType ( ) ) ; Utilities . removeReference ( c . anyHostRequests , req ) ; } removePendingRequestForType ( req ) ; }
Removes the request from the list of pending
33,335
public List < ResourceGrant > releaseResource ( List < Integer > idList ) { if ( deleted ) { throw new RuntimeException ( "Session: " + sessionId + " has been deleted" ) ; } List < ResourceGrant > canceledGrants = new ArrayList < ResourceGrant > ( ) ; for ( Integer id : idList ) { ResourceRequestInfo req = idToRequest . get ( id ) ; if ( req != null ) { idToRequest . remove ( id ) ; ResourceGrant grant = idToGrant . remove ( id ) ; if ( grant != null ) { canceledGrants . add ( grant ) ; removeGrantedRequest ( req , false ) ; } else { removePendingRequest ( req ) ; } incrementRequestCount ( req . getType ( ) , - 1 ) ; } } return canceledGrants ; }
Release the resources that are no longer needed from the session
33,336
public void grantResource ( ResourceRequestInfo req , ResourceGrant grant ) { if ( deleted ) { throw new RuntimeException ( "Session: " + sessionId + " has been deleted" ) ; } removePendingRequest ( req ) ; idToGrant . put ( req . getId ( ) , grant ) ; addGrantedRequest ( req ) ; synchronized ( typeToFirstWait ) { if ( ! typeToFirstWait . containsKey ( req . getType ( ) ) ) { throw new IllegalStateException ( "Impossible to get a grant prior to requesting a resource." ) ; } Long firstWait = typeToFirstWait . get ( req . getType ( ) ) ; if ( firstWait == null ) { firstWait = new Long ( ClusterManager . clock . getTime ( ) - startTime ) ; typeToFirstWait . put ( req . getType ( ) , firstWait ) ; } } }
Grant a resource to a session to satisfy a request . Update the finalized first resource type times if not already set
33,337
public List < ResourceGrant > revokeResource ( List < Integer > idList ) { if ( deleted ) { throw new RuntimeException ( "Session: " + sessionId + " has been deleted" ) ; } List < ResourceGrant > canceledGrants = new ArrayList < ResourceGrant > ( ) ; for ( Integer id : idList ) { ResourceRequestInfo req = idToRequest . get ( id ) ; ResourceGrant grant = idToGrant . remove ( id ) ; if ( grant != null ) { if ( req == null ) { throw new RuntimeException ( "Session: " + sessionId + ", requestId: " + id + " grant exists but request doesn't" ) ; } removeGrantedRequest ( req , true ) ; canceledGrants . add ( grant ) ; } } return canceledGrants ; }
Revoke a list of resources from a session
33,338
public List < GrantReport > getGrantReportList ( ) { Map < Integer , GrantReport > grantReportMap = new TreeMap < Integer , GrantReport > ( ) ; for ( Map . Entry < Integer , ResourceGrant > entry : idToGrant . entrySet ( ) ) { grantReportMap . put ( entry . getKey ( ) , new GrantReport ( entry . getKey ( ) . intValue ( ) , entry . getValue ( ) . getAddress ( ) . toString ( ) , entry . getValue ( ) . getType ( ) , entry . getValue ( ) . getGrantedTime ( ) ) ) ; } return new ArrayList < GrantReport > ( grantReportMap . values ( ) ) ; }
Get a snapshot of the grants used for this session for the web server . This method should be synchronized by the caller around the session .
33,339
public boolean checkHeartbeatInfo ( HeartbeatArgs jtInfo ) { if ( expectedInfo . requestId != jtInfo . requestId ) { LOG . fatal ( "heartbeat out-of-sync:" + sessionId + " CM:" + expectedInfo . requestId + " " + expectedInfo . grantId + " JT:" + jtInfo . requestId + " " + jtInfo . grantId ) ; return false ; } if ( expectedInfo . grantId == jtInfo . grantId ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "heartbeat match:" + sessionId ) ; } lastSyncTime = System . currentTimeMillis ( ) ; lastHeartbeat . requestId = 0 ; lastHeartbeat . grantId = 0 ; return true ; } if ( jtInfo . grantId != lastHeartbeat . grantId ) { LOG . info ( "heartbeat mismatch with progress:" + sessionId + " CM:" + expectedInfo . requestId + " " + expectedInfo . grantId + " JT:" + jtInfo . requestId + " " + jtInfo . grantId ) ; lastSyncTime = System . currentTimeMillis ( ) ; lastHeartbeat . requestId = jtInfo . requestId ; lastHeartbeat . grantId = jtInfo . grantId ; return true ; } if ( System . currentTimeMillis ( ) - lastSyncTime > maxDelay ) { LOG . error ( "heartbeat out-of-sync:" + sessionId + " CM:" + expectedInfo . requestId + " " + expectedInfo . grantId + " JT:" + jtInfo . requestId + " " + jtInfo . grantId ) ; return true ; } LOG . info ( "heartbeat mismatch with no progress:" + sessionId + " CM:" + expectedInfo . requestId + " " + expectedInfo . grantId + " JT:" + jtInfo . requestId + " " + jtInfo . grantId ) ; return true ; }
Check if the heartbeatInfo between JT and CM are in sync . If this method returns false it means JT and CM are out of sync very badly and the session will be killed .
33,340
public List < Integer > getGrantsToPreempt ( int maxGrantsToPreempt , long maxRunningTime , ResourceType type ) { if ( deleted ) { LOG . warn ( "Attempt to preempt from deleted session " + getSessionId ( ) ) ; return Collections . emptyList ( ) ; } List < ResourceGrant > candidates = getGrantsYoungerThan ( maxRunningTime , type ) ; List < Integer > grantIds = new ArrayList < Integer > ( ) ; if ( candidates . size ( ) <= maxGrantsToPreempt ) { for ( ResourceGrant grant : candidates ) { grantIds . add ( grant . id ) ; } } else { sortGrantsByStartTime ( candidates ) ; for ( ResourceGrant grant : candidates ) { grantIds . add ( grant . id ) ; if ( grantIds . size ( ) == maxGrantsToPreempt ) { break ; } } } LOG . info ( "Found " + grantIds . size ( ) + " " + type + " grants younger than " + maxRunningTime + " ms to preempt in " + getSessionId ( ) ) ; return grantIds ; }
Obtain a list of grants to be preempted
33,341
private List < ResourceGrant > getGrantsYoungerThan ( long maxRunningTime , ResourceType type ) { long now = ClusterManager . clock . getTime ( ) ; List < ResourceGrant > candidates = new ArrayList < ResourceGrant > ( ) ; for ( ResourceGrant grant : getGrants ( ) ) { if ( now - grant . getGrantedTime ( ) < maxRunningTime && type . equals ( grant . getType ( ) ) ) { candidates . add ( grant ) ; } } return candidates ; }
Get all the grants that have been running for less than maxRunningTime
33,342
private void sortGrantsByStartTime ( List < ResourceGrant > grants ) { Collections . sort ( grants , new Comparator < ResourceGrant > ( ) { public int compare ( ResourceGrant g1 , ResourceGrant g2 ) { if ( g1 . grantedTime < g2 . grantedTime ) { return 1 ; } if ( g1 . grantedTime > g2 . grantedTime ) { return - 1 ; } return g2 . id - g1 . id ; } } ) ; }
Sort grants on granted time in descending order
33,343
static void checkChannelValidity ( Object channel ) throws IOException { if ( channel == null ) { throw new IOException ( "Channel is null. Check " + "how the channel or socket is created." ) ; } if ( ! ( channel instanceof SelectableChannel ) ) { throw new IOException ( "Channel should be a SelectableChannel" ) ; } }
Utility function to check if channel is ok . Mainly to throw IOException instead of runtime exception in case of mismatch . This mismatch can occur for many runtime reasons .
33,344
int doIO ( ByteBuffer buf , int ops ) throws IOException { if ( ! buf . hasRemaining ( ) ) { throw new IllegalArgumentException ( "Buffer has no data left." ) ; } while ( buf . hasRemaining ( ) ) { if ( closed ) { return - 1 ; } try { int n = performIO ( buf ) ; if ( n != 0 ) { return n ; } } catch ( IOException e ) { if ( ! channel . isOpen ( ) ) { closed = true ; } throw e ; } int count = 0 ; try { count = selector . select ( channel , ops , timeout ) ; } catch ( IOException e ) { closed = true ; throw e ; } if ( count == 0 ) { throw new SocketTimeoutException ( timeoutExceptionString ( channel , timeout , ops ) ) ; } } return 0 ; }
Performs one IO and returns number of bytes read or written . It waits up to the specified timeout . If the channel is not read before the timeout SocketTimeoutException is thrown .
33,345
File findFinalizedEditsFile ( long startTxId , long endTxId ) throws IOException { File ret = new File ( sd . getCurrentDir ( ) , NNStorage . getFinalizedEditsFileName ( startTxId , endTxId ) ) ; if ( ! ret . exists ( ) ) { throw new IOException ( "No edits file for range " + startTxId + "-" + endTxId ) ; } return ret ; }
Find an edits file spanning the given transaction ID range . If no such file exists an exception is thrown .
33,346
File getSyncLogTemporaryFile ( long segmentTxId , long endTxId , long stamp ) { String name = NNStorage . getFinalizedEditsFileName ( segmentTxId , endTxId ) + ".tmp=" + stamp ; return new File ( sd . getCurrentDir ( ) , name ) ; }
Get name for temporary file used for log syncing after a journal node crashed .
33,347
File getSyncLogDestFile ( long segmentTxId , long endTxId ) { String name = NNStorage . getFinalizedEditsFileName ( segmentTxId , endTxId ) ; return new File ( sd . getCurrentDir ( ) , name ) ; }
Get name for destination file used for log syncing after a journal node crashed .
33,348
void purgeDataOlderThan ( long minTxIdToKeep ) throws IOException { if ( isImageDir ) { purgeMatching ( sd . getCurrentDir ( ) , IMAGE_CURRENT_DIR_PURGE_REGEXES , minTxIdToKeep ) ; } else { purgeMatching ( sd . getCurrentDir ( ) , EDITS_CURRENT_DIR_PURGE_REGEXES , minTxIdToKeep ) ; purgeMatching ( getPaxosDir ( ) , PAXOS_DIR_PURGE_REGEXES , minTxIdToKeep ) ; } }
Remove any log files and associated paxos files which are older than the given txid .
33,349
protected void writeInputData ( long genbytes , Path ioPath ) throws IOException , InterruptedException { final Configuration conf = getConf ( ) ; final GridmixJob genData = new GenerateData ( conf , ioPath , genbytes ) ; submitter . add ( genData ) ; LOG . info ( "Generating " + StringUtils . humanReadableInt ( genbytes ) + " of test data..." ) ; TimeUnit . SECONDS . sleep ( 10 ) ; try { genData . getJob ( ) . waitForCompletion ( false ) ; } catch ( ClassNotFoundException e ) { throw new IOException ( "Internal error" , e ) ; } if ( ! genData . getJob ( ) . isSuccessful ( ) ) { throw new IOException ( "Data generation failed!" ) ; } LOG . info ( "Done." ) ; }
Write random bytes at the path provided .
33,350
private void startThreads ( Configuration conf , String traceIn , Path ioPath , Path scratchDir , CountDownLatch startFlag ) throws IOException { monitor = createJobMonitor ( ) ; submitter = createJobSubmitter ( monitor , conf . getInt ( GRIDMIX_SUB_THR , Runtime . getRuntime ( ) . availableProcessors ( ) + 1 ) , conf . getInt ( GRIDMIX_QUE_DEP , 5 ) , new FilePool ( conf , ioPath ) ) ; factory = createJobFactory ( submitter , traceIn , scratchDir , conf , startFlag ) ; monitor . start ( ) ; submitter . start ( ) ; factory . start ( ) ; }
Create each component in the pipeline and start it .
33,351
ClusterStatus generateClusterHealthReport ( ) { ClusterStatus cs = new ClusterStatus ( ) ; List < InetSocketAddress > isas = null ; ArrayList < String > suffixes = null ; if ( isAvatar ) { suffixes = new ArrayList < String > ( ) ; suffixes . add ( "0" ) ; suffixes . add ( "1" ) ; } try { cs . nnAddrs = isas = DFSUtil . getClientRpcAddresses ( conf , suffixes ) ; } catch ( Exception e ) { cs . setError ( e ) ; LOG . error ( e ) ; return cs ; } sort ( isas ) ; NameNodeStatusFetcher [ ] threads = new NameNodeStatusFetcher [ isas . size ( ) ] ; for ( int i = 0 ; i < isas . size ( ) ; i ++ ) { threads [ i ] = new NameNodeStatusFetcher ( isas . get ( i ) ) ; threads [ i ] . start ( ) ; } for ( NameNodeStatusFetcher thread : threads ) { try { thread . join ( ) ; if ( thread . e != null ) { cs . addException ( thread . isa . toString ( ) , thread . e ) ; } cs . addNamenodeStatus ( thread . nn ) ; } catch ( InterruptedException ex ) { LOG . warn ( ex ) ; } } return cs ; }
JSP helper function that generates cluster health report . When encountering exception while getting Namenode status the exception will be listed in the page with corresponding stack trace .
33,352
DecommissionStatus generateDecommissioningReport ( ) { List < InetSocketAddress > isas = null ; ArrayList < String > suffixes = null ; if ( isAvatar ) { suffixes = new ArrayList < String > ( ) ; suffixes . add ( "0" ) ; suffixes . add ( "1" ) ; } try { isas = DFSUtil . getClientRpcAddresses ( conf , suffixes ) ; sort ( isas ) ; } catch ( Exception e ) { DecommissionStatus dInfo = new DecommissionStatus ( e ) ; LOG . error ( e ) ; return dInfo ; } Map < String , Map < String , String > > statusMap = new HashMap < String , Map < String , String > > ( ) ; Map < String , Exception > decommissionExceptions = new HashMap < String , Exception > ( ) ; List < String > unreportedNamenode = new ArrayList < String > ( ) ; DecommissionStatusFetcher [ ] threads = new DecommissionStatusFetcher [ isas . size ( ) ] ; for ( int i = 0 ; i < isas . size ( ) ; i ++ ) { threads [ i ] = new DecommissionStatusFetcher ( isas . get ( i ) , statusMap ) ; threads [ i ] . start ( ) ; } for ( DecommissionStatusFetcher thread : threads ) { try { thread . join ( ) ; if ( thread . e != null ) { decommissionExceptions . put ( thread . isa . toString ( ) , thread . e ) ; unreportedNamenode . add ( thread . isa . toString ( ) ) ; } } catch ( InterruptedException ex ) { LOG . warn ( ex ) ; } } updateUnknownStatus ( statusMap , unreportedNamenode ) ; getDecommissionNodeClusterState ( statusMap ) ; return new DecommissionStatus ( statusMap , isas , getDatanodeHttpPort ( conf ) , decommissionExceptions ) ; }
Helper function that generates the decommissioning report .
33,353
private void updateUnknownStatus ( Map < String , Map < String , String > > statusMap , List < String > unreportedNn ) { if ( unreportedNn == null || unreportedNn . isEmpty ( ) ) { return ; } for ( Map . Entry < String , Map < String , String > > entry : statusMap . entrySet ( ) ) { String dn = entry . getKey ( ) ; Map < String , String > nnStatus = entry . getValue ( ) ; for ( String nn : unreportedNn ) { nnStatus . put ( nn , DecommissionStates . UNKNOWN . toString ( ) ) ; } statusMap . put ( dn , nnStatus ) ; } }
update unknown status in datanode status map for every unreported namenode
33,354
private int getDatanodeHttpPort ( Configuration conf ) { String address = conf . get ( "dfs.datanode.http.address" , "" ) ; if ( address . equals ( "" ) ) { return - 1 ; } return Integer . parseInt ( address . split ( ":" ) [ 1 ] ) ; }
Get datanode http port from configration
33,355
private void readJobInformation ( JobConf jobConf , JobInfo jobInfo ) throws Exception { URL jobConfFileUrl = new URL ( this . _jobConfFile ) ; URL jobHistoryFileUrl = new URL ( this . _jobHistoryFile ) ; jobConf . addResource ( jobConfFileUrl ) ; if ( jobHistoryFileUrl . getProtocol ( ) . equals ( "hdfs" ) ) { DefaultJobHistoryParser . parseJobTasks ( jobHistoryFileUrl . getPath ( ) , jobInfo , FileSystem . get ( jobConf ) ) ; } else if ( jobHistoryFileUrl . getProtocol ( ) . equals ( "file" ) ) { DefaultJobHistoryParser . parseJobTasks ( jobHistoryFileUrl . getPath ( ) , jobInfo , FileSystem . getLocal ( jobConf ) ) ; } else { throw new Exception ( "Malformed URL. Protocol: " + jobHistoryFileUrl . getProtocol ( ) ) ; } }
read and populate job statistics information .
33,356
private float getMapAvarageProgress ( int tasksPerBar , int index , TaskReport [ ] reports ) { float progress = 0f ; int k = 0 ; for ( ; k < tasksPerBar && index + k < reports . length ; k ++ ) { progress += reports [ index + k ] . getProgress ( ) ; } progress /= k ; return progress ; }
Computes average progress per bar
33,357
private float [ ] getReduceAvarageProgresses ( int tasksPerBar , int index , TaskReport [ ] reports ) { float [ ] progresses = new float [ ] { 0 , 0 , 0 } ; int k = 0 ; for ( ; k < tasksPerBar && index + k < reports . length ; k ++ ) { float progress = reports [ index + k ] . getProgress ( ) ; for ( int j = 0 ; progress > 0 ; j ++ , progress -= oneThird ) { if ( progress > oneThird ) progresses [ j ] += 1f ; else progresses [ j ] += progress * 3 ; } } for ( int j = 0 ; j < 3 ; j ++ ) { progresses [ j ] /= k ; } return progresses ; }
Computes average progresses per bar
33,358
private void setUp ( ) throws Exception { try { fileSystem = ( DistributedFileSystem ) FileSystem . get ( StorageServiceConfigKeys . translateToOldSchema ( conf , nameserviceId ) , conf ) ; InetSocketAddress nameNodeAddr = fileSystem . getClient ( ) . getNameNodeAddr ( ) ; metaInfo = new RequestMetaInfo ( clusterId , nameserviceId , RequestMetaInfo . NO_NAMESPACE_ID , RequestMetaInfo . NO_APPLICATION_ID , ( UnixUserGroupInformation ) UserGroupInformation . getUGI ( this . conf ) ) ; directClientProtocol = RPC . getProxy ( ClientProtocol . class , ClientProtocol . versionID , nameNodeAddr , conf ) ; directClientProxyProtocol = RPC . getProxy ( ClientProxyProtocol . class , ClientProxyProtocol . versionID , nameNodeAddr , conf ) ; clientManager = new ThriftClientManager ( ) ; FramedClientConnector connector = new FramedClientConnector ( HostAndPort . fromParts ( proxyHostname , proxyPortThrift ) ) ; proxyTClientProxyProtocol = clientManager . createClient ( connector , TClientProxyProtocol . class ) . get ( ) ; proxyClientProxyProtocol = RPC . getProxy ( ClientProxyProtocol . class , ClientProxyProtocol . versionID , new InetSocketAddress ( proxyHostname , proxyPortRPC ) , conf ) ; fileSystem . mkdirs ( new Path ( ROOT ) ) ; } catch ( Exception e ) { tearDown ( ) ; throw e ; } }
Sets up clients before each benchmark
33,359
private void tearDown ( ) throws Exception { try { if ( fileSystem != null ) { fileSystem . delete ( new Path ( ROOT ) , true , true ) ; } } finally { RPC . stopProxy ( proxyClientProxyProtocol ) ; IOUtils . cleanup ( LOG , proxyTClientProxyProtocol , clientManager , fileSystem ) ; } }
Tears down clients after each benchmark
33,360
public static String normalizePath ( String path ) { path = path . replace ( "//" , "/" ) ; path = path . replace ( "\\" , "/" ) ; if ( path . length ( ) > 1 && path . endsWith ( "/" ) ) { path = path . substring ( 0 , path . length ( ) - 1 ) ; } return path ; }
This method is copied from Path .
33,361
private static Shard createShardFromString ( String str ) { int first = str . indexOf ( "@" ) ; int second = str . indexOf ( "@" , first + 1 ) ; long version = Long . parseLong ( str . substring ( 0 , first ) ) ; String dir = str . substring ( first + 1 , second ) ; long gen = Long . parseLong ( str . substring ( second + 1 ) ) ; return new Shard ( version , dir , gen ) ; }
assume str is formatted correctly as a shard string
33,362
public int compareTo ( Shard other ) { if ( version < other . version ) { return - 1 ; } else if ( version > other . version ) { return 1 ; } int result = dir . compareTo ( other . dir ) ; if ( result != 0 ) { return result ; } if ( gen < other . gen ) { return - 1 ; } else if ( gen == other . gen ) { return 0 ; } else { return 1 ; } }
Compare to another shard .
33,363
public static String [ ] getUlimitMemoryCommand ( int memoryLimit ) { if ( WINDOWS ) { return null ; } return new String [ ] { ULIMIT_COMMAND , "-v" , String . valueOf ( memoryLimit ) } ; }
Get the Unix command for setting the maximum virtual memory available to a given child process . This is only relevant when we are forking a process from within the Mapper or the Reducer implementations . Also see Hadoop Pipes and Hadoop Streaming .
33,364
protected StringBuilder appendTo ( StringBuilder builder ) { return jobId . appendTo ( builder ) . append ( SEPARATOR ) . append ( isMap ? 'm' : 'r' ) . append ( SEPARATOR ) . append ( idFormat . format ( id ) ) ; }
Add the unique string to the given builder .
33,365
public static TaskID forName ( String str ) throws IllegalArgumentException { if ( str == null ) return null ; try { String [ ] parts = str . split ( "_" ) ; if ( parts . length == 5 ) { if ( parts [ 0 ] . equals ( TASK ) ) { boolean isMap = false ; if ( parts [ 3 ] . equals ( "m" ) ) isMap = true ; else if ( parts [ 3 ] . equals ( "r" ) ) isMap = false ; else throw new Exception ( ) ; return new org . apache . hadoop . mapred . TaskID ( parts [ 1 ] , Integer . parseInt ( parts [ 2 ] ) , isMap , Integer . parseInt ( parts [ 4 ] ) ) ; } } } catch ( Exception ex ) { } throw new IllegalArgumentException ( "TaskId string : " + str + " is not properly formed" ) ; }
Construct a TaskID object from given string
33,366
@ SuppressWarnings ( "deprecation" ) public static String readString ( DataInputStream in ) throws IOException { UTF8 ustr = TL_DATA . get ( ) . U_STR ; ustr . readFields ( in ) ; return ustr . toString ( ) ; }
by other code .
33,367
public void doMonitor ( ) { while ( running ) { String [ ] keys = null ; synchronized ( jobs ) { keys = jobs . keySet ( ) . toArray ( new String [ 0 ] ) ; } Map < String , List < DistRaid > > finishedJobs = new HashMap < String , List < DistRaid > > ( ) ; for ( String key : keys ) { DistRaid [ ] jobListCopy = null ; synchronized ( jobs ) { List < DistRaid > jobList = jobs . get ( key ) ; synchronized ( jobList ) { jobListCopy = jobList . toArray ( new DistRaid [ jobList . size ( ) ] ) ; } } for ( DistRaid job : jobListCopy ) { try { boolean complete = job . checkComplete ( ) ; if ( complete ) { addJob ( finishedJobs , key , job ) ; if ( job . successful ( ) ) { jobsSucceeded ++ ; } } else if ( System . currentTimeMillis ( ) - job . getStartTime ( ) > maximumRunningTime ) { throw new Exception ( "Job " + job . getJobID ( ) + " is hanging more than " + maximumRunningTime / 1000 + " seconds. Kill it" ) ; } } catch ( Exception e ) { addJob ( finishedJobs , key , job ) ; try { job . killJob ( ) ; } catch ( Exception ee ) { LOG . error ( ee ) ; } } } } if ( finishedJobs . size ( ) > 0 ) { for ( String key : finishedJobs . keySet ( ) ) { List < DistRaid > finishedJobList = finishedJobs . get ( key ) ; for ( DistRaid job : finishedJobList ) { addCounter ( raidProgress , job , INT_CTRS ) ; removeJob ( jobs , key , job ) ; addJob ( history , key , job ) ; job . cleanUp ( ) ; } } } try { Thread . sleep ( jobMonitorInterval ) ; } catch ( InterruptedException ie ) { } } }
Periodically checks status of running map - reduce jobs .
33,368
int runningJobsCount ( ) { int total = 0 ; synchronized ( jobs ) { for ( String key : jobs . keySet ( ) ) { total += jobs . get ( key ) . size ( ) ; } } return total ; }
For test code
33,369
public static void main ( String [ ] args ) { System . out . printf ( "TFile Dumper (TFile %s, BCFile %s)\n" , TFile . API_VERSION . toString ( ) , BCFile . API_VERSION . toString ( ) ) ; if ( args . length == 0 ) { System . out . println ( "Usage: java ... org.apache.hadoop.io.file.tfile.TFile tfile-path [tfile-path ...]" ) ; System . exit ( 0 ) ; } Configuration conf = new Configuration ( ) ; for ( String file : args ) { System . out . println ( "===" + file + "===" ) ; try { TFileDumper . dumpInfo ( file , System . out , conf ) ; } catch ( IOException e ) { e . printStackTrace ( System . err ) ; } } }
Dumping the TFile information .
33,370
private Map < Task , Map < LogName , LogFileDetail > > getAllLogsFileDetails ( final List < Task > allAttempts ) throws IOException { Map < Task , Map < LogName , LogFileDetail > > taskLogFileDetails = new HashMap < Task , Map < LogName , LogFileDetail > > ( ) ; for ( Task task : allAttempts ) { Map < LogName , LogFileDetail > allLogsFileDetails ; allLogsFileDetails = TaskLog . getAllLogsFileDetails ( task . getTaskID ( ) , task . isTaskCleanupTask ( ) ) ; taskLogFileDetails . put ( task , allLogsFileDetails ) ; } return taskLogFileDetails ; }
Get the logFileDetails of all the list of attempts passed .
33,371
private boolean isTruncationNeeded ( PerJVMInfo lInfo , Map < Task , Map < LogName , LogFileDetail > > taskLogFileDetails , LogName logName ) { boolean truncationNeeded = false ; LogFileDetail logFileDetail = null ; for ( Task task : lInfo . allAttempts ) { long taskRetainSize = ( task . isMapTask ( ) ? mapRetainSize : reduceRetainSize ) ; Map < LogName , LogFileDetail > allLogsFileDetails = taskLogFileDetails . get ( task ) ; logFileDetail = allLogsFileDetails . get ( logName ) ; if ( taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION && logFileDetail . length > taskRetainSize ) { truncationNeeded = true ; break ; } } return truncationNeeded ; }
Check if truncation of logs is needed for the given jvmInfo . If all the tasks that ran in a JVM are within the log - limits then truncation is not needed . Otherwise it is needed .
33,372
private LogFileDetail truncateALogFileOfAnAttempt ( final TaskAttemptID taskID , final LogFileDetail oldLogFileDetail , final long taskRetainSize , final FileWriter tmpFileWriter , final FileReader logFileReader ) throws IOException { LogFileDetail newLogFileDetail = new LogFileDetail ( ) ; newLogFileDetail . location = oldLogFileDetail . location ; if ( taskRetainSize > MINIMUM_RETAIN_SIZE_FOR_TRUNCATION && oldLogFileDetail . length > taskRetainSize ) { LOG . info ( "Truncating logs for " + taskID + " from " + oldLogFileDetail . length + "bytes to " + taskRetainSize + "bytes." ) ; newLogFileDetail . length = taskRetainSize ; } else { LOG . info ( "No truncation needed for " + taskID + " length is " + oldLogFileDetail . length + " retain size " + taskRetainSize + "bytes." ) ; newLogFileDetail . length = oldLogFileDetail . length ; } long charsSkipped = logFileReader . skip ( oldLogFileDetail . length - newLogFileDetail . length ) ; if ( charsSkipped != oldLogFileDetail . length - newLogFileDetail . length ) { throw new IOException ( "Erroneously skipped " + charsSkipped + " instead of the expected " + ( oldLogFileDetail . length - newLogFileDetail . length ) ) ; } long alreadyRead = 0 ; while ( alreadyRead < newLogFileDetail . length ) { char tmpBuf [ ] ; if ( newLogFileDetail . length - alreadyRead >= DEFAULT_BUFFER_SIZE ) { tmpBuf = new char [ DEFAULT_BUFFER_SIZE ] ; } else { tmpBuf = new char [ ( int ) ( newLogFileDetail . length - alreadyRead ) ] ; } int bytesRead = logFileReader . read ( tmpBuf ) ; if ( bytesRead < 0 ) { break ; } else { alreadyRead += bytesRead ; } tmpFileWriter . write ( tmpBuf ) ; } return newLogFileDetail ; }
Truncate the log file of this task - attempt so that only the last retainSize many bytes of each log file is retained and the log file is reduced in size saving disk space .
33,373
private void updateIndicesAfterLogTruncation ( TaskAttemptID firstAttempt , Map < Task , Map < LogName , LogFileDetail > > updatedTaskLogFileDetails ) { for ( Entry < Task , Map < LogName , LogFileDetail > > entry : updatedTaskLogFileDetails . entrySet ( ) ) { Task task = entry . getKey ( ) ; Map < LogName , LogFileDetail > logFileDetails = entry . getValue ( ) ; Map < LogName , Long [ ] > logLengths = new HashMap < LogName , Long [ ] > ( ) ; for ( LogName logName : TaskLog . LOGS_TRACKED_BY_INDEX_FILES ) { logLengths . put ( logName , new Long [ ] { Long . valueOf ( 0L ) , Long . valueOf ( 0L ) } ) ; LogFileDetail lfd = logFileDetails . get ( logName ) ; if ( lfd != null ) { logLengths . get ( logName ) [ 0 ] = Long . valueOf ( lfd . start ) ; logLengths . get ( logName ) [ 1 ] = Long . valueOf ( lfd . start + lfd . length ) ; } } try { TaskLog . writeToIndexFile ( firstAttempt , task . getTaskID ( ) , task . isTaskCleanupTask ( ) , logLengths ) ; } catch ( IOException ioe ) { LOG . warn ( "Exception in updateIndicesAfterLogTruncation : " + StringUtils . stringifyException ( ioe ) ) ; LOG . warn ( "Exception encountered while updating index file of task " + task . getTaskID ( ) + ". Ignoring and continuing with other tasks." ) ; } } }
Truncation of logs is done . Now sync the index files to reflect the truncated sizes .
33,374
private String getStatsString ( ) { SplitTypeStats allTypeStats = splitTypeStatsMap . get ( SplitType . ALL ) ; Map < String , Map < String , Number > > statsMapMap = new HashMap < String , Map < String , Number > > ( ) ; for ( Map . Entry < SplitType , SplitTypeStats > entry : splitTypeStatsMap . entrySet ( ) ) { Map < String , Number > statsMap = new HashMap < String , Number > ( ) ; statsMapMap . put ( entry . getKey ( ) . toString ( ) , statsMap ) ; float percentTotalSplitCount = ( 100f * entry . getValue ( ) . getTotalSplitCount ( ) ) / allTypeStats . getTotalSplitCount ( ) ; float percentTotalSize = ( 100f * entry . getValue ( ) . getTotalSize ( ) ) / allTypeStats . getTotalSize ( ) ; float percentTotalBlockCount = ( 100f * entry . getValue ( ) . getTotalBlockCount ( ) ) / allTypeStats . getTotalBlockCount ( ) ; float averageSizePerSplit = ( ( float ) entry . getValue ( ) . getTotalSize ( ) ) / entry . getValue ( ) . getTotalSplitCount ( ) ; float averageHostCountPerSplit = ( ( float ) entry . getValue ( ) . getTotalHostCount ( ) ) / entry . getValue ( ) . getTotalSplitCount ( ) ; float averageBlockCountPerSplit = ( ( float ) entry . getValue ( ) . getTotalBlockCount ( ) ) / entry . getValue ( ) . getTotalSplitCount ( ) ; statsMap . put ( "totalSplitCount" , entry . getValue ( ) . getTotalSplitCount ( ) ) ; statsMap . put ( "percentTotalSplitCount" , percentTotalSplitCount ) ; statsMap . put ( "totalSize" , entry . getValue ( ) . getTotalSize ( ) ) ; statsMap . put ( "percentTotalSize" , percentTotalSize ) ; statsMap . put ( "averageSizePerSplit" , averageSizePerSplit ) ; statsMap . put ( "totalHostCount" , entry . getValue ( ) . getTotalHostCount ( ) ) ; statsMap . put ( "averageHostCountPerSplit" , averageHostCountPerSplit ) ; statsMap . put ( "totalBlockCount" , entry . getValue ( ) . getTotalBlockCount ( ) ) ; statsMap . put ( "percentTotalBlockCount" , percentTotalBlockCount ) ; statsMap . put ( "averageBlockCountPerSplit" , averageBlockCountPerSplit ) ; } return JSON . toString ( statsMapMap ) ; }
Get stats for every split type as a string
33,375
protected void createPool ( JobConf conf , List < PathFilter > filters ) { pools . add ( new MultiPathFilter ( filters ) ) ; }
Create a new pool and add the filters to it . A split cannot have files from different pools .
33,376
private void sortBlocksBySize ( Map < String , List < OneBlockInfo > > nodeToBlocks ) { OneBlockInfoSizeComparator comparator = new OneBlockInfoSizeComparator ( ) ; for ( Entry < String , List < OneBlockInfo > > entry : nodeToBlocks . entrySet ( ) ) { Collections . sort ( entry . getValue ( ) , comparator ) ; } }
Sort the blocks on each node by size largest to smallest
33,377
public void updateMetrics ( ) { Counters counters = getCounters ( ) ; for ( Counters . Group group : counters ) { jobMetrics . setTag ( "group" , group . getDisplayName ( ) ) ; for ( Counters . Counter counter : group ) { jobMetrics . setTag ( "counter" , counter . getDisplayName ( ) ) ; jobMetrics . setMetric ( "value" , ( float ) counter . getCounter ( ) ) ; jobMetrics . update ( ) ; } } }
Called periodically by JobTrackerMetrics to update the metrics for this job .
33,378
TaskInProgress [ ] getTasks ( TaskType type ) { TaskInProgress [ ] tasks = null ; switch ( type ) { case MAP : { tasks = maps ; } break ; case REDUCE : { tasks = reduces ; } break ; case JOB_SETUP : { tasks = setup ; } break ; case JOB_CLEANUP : { tasks = cleanup ; } break ; default : { tasks = new TaskInProgress [ 0 ] ; } break ; } return tasks ; }
Get all the tasks of the desired type in this job .
33,379
private synchronized boolean canLaunchJobCleanupTask ( ) { if ( status . getRunState ( ) != JobStatus . RUNNING && status . getRunState ( ) != JobStatus . PREP ) { return false ; } if ( launchedCleanup || ! isSetupFinished ( ) ) { return false ; } if ( jobKilled || jobFailed ) { return true ; } boolean mapsDone = ( ( finishedMapTasks + failedMapTIPs ) == ( numMapTasks ) ) ; boolean reducesDone = ( ( finishedReduceTasks + failedReduceTIPs ) == numReduceTasks ) ; boolean mapOnlyJob = ( numReduceTasks == 0 ) ; if ( mapOnlyJob ) { return mapsDone ; } if ( jobFinishWhenReducesDone ) { return reducesDone ; } return mapsDone && reducesDone ; }
Check whether cleanup task can be launched for the job .
33,380
private synchronized boolean canLaunchSetupTask ( ) { return ( tasksInited . get ( ) && status . getRunState ( ) == JobStatus . PREP && ! launchedSetup && ! jobKilled && ! jobFailed ) ; }
Check whether setup task can be launched for the job .
33,381
public synchronized Task obtainNewReduceTask ( TaskTrackerStatus tts , int clusterSize , int numUniqueHosts ) throws IOException { if ( status . getRunState ( ) != JobStatus . RUNNING ) { LOG . info ( "Cannot create task split for " + profile . getJobID ( ) ) ; return null ; } if ( ! scheduleReduces ( ) ) { return null ; } int target = findNewReduceTask ( tts , clusterSize , numUniqueHosts ) ; if ( target == - 1 ) { return null ; } Task result = reduces [ target ] . getTaskToRun ( tts . getTrackerName ( ) ) ; if ( result != null ) { addRunningTaskToTIP ( reduces [ target ] , result . getTaskID ( ) , tts , true ) ; } return result ; }
Return a ReduceTask if appropriate to run on the given tasktracker . We don t have cache - sensitivity for reduce tasks as they work on temporary MapRed files .
33,382
Map < String , List < String > > getBlackListedTrackers ( ) { Map < String , List < String > > blackListedTrackers = new HashMap < String , List < String > > ( ) ; for ( Map . Entry < String , List < String > > e : trackerToFailuresMap . entrySet ( ) ) { if ( e . getValue ( ) . size ( ) >= maxTaskFailuresPerTracker ) { blackListedTrackers . put ( e . getKey ( ) , e . getValue ( ) ) ; } } return blackListedTrackers ; }
Get the black listed trackers for the job and corresponding errors .
33,383
synchronized Map < String , List < String > > getTaskTrackerErrors ( ) { Map < String , List < String > > trackerErrors = new TreeMap < String , List < String > > ( trackerToFailuresMap ) ; return trackerErrors ; }
Get the information on tasktrackers and no . of errors which occurred on them for a given job .
33,384
protected synchronized void scheduleMap ( TaskInProgress tip ) { runningMapTaskStats . add ( 0.0f ) ; runningTaskMapByteProcessingRateStats . add ( 0.0f ) ; if ( runningMapCache == null ) { LOG . warn ( "Running cache for maps is missing!! " + "Job details are missing." ) ; return ; } String [ ] splitLocations = tip . getSplitLocations ( ) ; if ( splitLocations . length == 0 ) { nonLocalRunningMaps . add ( tip ) ; return ; } for ( String host : splitLocations ) { Node node = jobtracker . getNode ( host ) ; for ( int j = 0 ; j < maxLevel ; ++ j ) { Set < TaskInProgress > hostMaps = runningMapCache . get ( node ) ; if ( hostMaps == null ) { hostMaps = new LinkedHashSet < TaskInProgress > ( ) ; runningMapCache . put ( node , hostMaps ) ; } hostMaps . add ( tip ) ; node = node . getParent ( ) ; } } }
Adds a map tip to the list of running maps .
33,385
protected synchronized void scheduleReduce ( TaskInProgress tip ) { runningReduceTaskStats . add ( 0.0f ) ; runningTaskCopyProcessingRateStats . add ( 0.0f ) ; runningTaskSortProcessingRateStats . add ( 0.0f ) ; runningTaskReduceProcessingRateStats . add ( 0.0f ) ; if ( runningReduces == null ) { LOG . warn ( "Running cache for reducers missing!! " + "Job details are missing." ) ; return ; } runningReduces . add ( tip ) ; }
Adds a reduce tip to the list of running reduces
33,386
private synchronized int findNewReduceTask ( TaskTrackerStatus tts , int clusterSize , int numUniqueHosts ) { if ( numReduceTasks == 0 ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "No reduces to schedule for " + profile . getJobID ( ) ) ; } return - 1 ; } String taskTracker = tts . getTrackerName ( ) ; TaskInProgress tip = null ; this . clusterSize = clusterSize ; if ( ! shouldRunOnTaskTracker ( taskTracker ) ) { return - 1 ; } long outSize = resourceEstimator . getEstimatedReduceInputSize ( ) ; long availSpace = tts . getResourceStatus ( ) . getAvailableSpace ( ) ; final long SAVETY_BUFFER = conf . getLong ( "mapred.reduce.reserved.disk.mb" , 300 ) * 1024 * 1024 ; if ( availSpace < outSize + SAVETY_BUFFER ) { LOG . warn ( "No room for reduce task. Node " + taskTracker + " has " + availSpace + " bytes free; The safty buffer is " + SAVETY_BUFFER + " bytes; but we expect map to take " + outSize ) ; return - 1 ; } tip = findTaskFromList ( nonRunningReduces , tts , numUniqueHosts , false ) ; if ( tip != null ) { scheduleReduce ( tip ) ; return tip . getIdWithinJob ( ) ; } if ( hasSpeculativeReduces ) { tip = getSpeculativeReduce ( tts . getTrackerName ( ) , tts . getHost ( ) ) ; if ( tip != null ) { scheduleReduce ( tip ) ; return tip . getIdWithinJob ( ) ; } } return - 1 ; }
Find new reduce task
33,387
public void kill ( ) { boolean killNow = false ; synchronized ( jobInitKillStatus ) { jobInitKillStatus . killed = true ; if ( ! jobInitKillStatus . initStarted || jobInitKillStatus . initDone ) { killNow = true ; } } if ( killNow ) { terminate ( JobStatus . KILLED ) ; } }
Kill the job and all its component tasks . This method should be called from jobtracker and should return fast as it locks the jobtracker .
33,388
synchronized void garbageCollect ( ) { cancelReservedSlots ( ) ; totalSpeculativeReduceTasks . addAndGet ( - speculativeReduceTasks ) ; totalSpeculativeMapTasks . addAndGet ( - speculativeMapTasks ) ; garbageCollected = true ; jobtracker . getInstrumentation ( ) . decWaitingMaps ( getJobID ( ) , pendingMaps ( ) ) ; jobtracker . getInstrumentation ( ) . decWaitingReduces ( getJobID ( ) , pendingReduces ( ) ) ; jobtracker . storeCompletedJob ( this ) ; jobtracker . finalizeJob ( this ) ; try { if ( localJobFile != null ) { localFs . delete ( localJobFile , true ) ; localJobFile = null ; } for ( int i = 0 ; i < maps . length ; i ++ ) { maps [ i ] . clearSplit ( ) ; } Path tempDir = jobtracker . getSystemDirectoryForJob ( getJobID ( ) ) ; new CleanupQueue ( ) . addToQueue ( new PathDeletionContext ( FileSystem . get ( conf ) , tempDir . toUri ( ) . getPath ( ) ) ) ; } catch ( IOException e ) { LOG . warn ( "Error cleaning up " + profile . getJobID ( ) + ": " + e ) ; } cleanUpMetrics ( ) ; this . nonRunningMapCache = null ; this . runningMapCache = null ; this . nonRunningReduces = null ; this . runningReduces = null ; this . trackerMapStats = null ; this . trackerReduceStats = null ; }
The job is dead . We re now GC ing it getting rid of the job from all tables . Be sure to remove all of this job s tasks from the various tables .
33,389
public synchronized TaskStatus findFinishedMap ( int mapId ) { TaskInProgress tip = maps [ mapId ] ; if ( tip . isComplete ( ) ) { TaskStatus [ ] statuses = tip . getTaskStatuses ( ) ; for ( int i = 0 ; i < statuses . length ; i ++ ) { if ( statuses [ i ] . getRunState ( ) == TaskStatus . State . SUCCEEDED ) { return statuses [ i ] ; } } } return null ; }
Find the details of someplace where a map has finished
33,390
private boolean atSpeculativeCap ( TaskType type ) { float numTasks = ( type == TaskType . MAP ) ? ( float ) ( runningMapTasks - speculativeMapTasks ) : ( float ) ( runningReduceTasks - speculativeReduceTasks ) ; if ( numTasks == 0 ) { return true ; } int speculativeTaskCount = type == TaskType . MAP ? speculativeMapTasks : speculativeReduceTasks ; int totalSpeculativeTaskCount = type == TaskType . MAP ? totalSpeculativeMapTasks . get ( ) : totalSpeculativeReduceTasks . get ( ) ; if ( speculativeTaskCount < MIN_SPEC_CAP ) { return false ; } ClusterStatus c = jobtracker . getClusterStatus ( false ) ; int numSlots = ( type == TaskType . MAP ? c . getMaxMapTasks ( ) : c . getMaxReduceTasks ( ) ) ; if ( speculativeTaskCount < numSlots * MIN_SLOTS_CAP ) { return false ; } if ( totalSpeculativeTaskCount >= numSlots * TOTAL_SPECULATIVECAP ) { return true ; } boolean atCap = ( ( ( speculativeTaskCount ) / numTasks ) >= speculativeCap ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "SpeculativeCap is " + speculativeCap + ", specTasks/numTasks is " + ( ( speculativeTaskCount ) / numTasks ) + ", so atSpecCap() is returning " + atCap ) ; } return atCap ; }
Check to see if the maximum number of speculative tasks are already being executed currently .
33,391
protected boolean isSlowTracker ( String taskTracker ) { if ( trackerMapStats . get ( taskTracker ) != null && trackerMapStats . get ( taskTracker ) . mean ( ) - mapTaskStats . mean ( ) > mapTaskStats . std ( ) * slowNodeThreshold ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Tracker " + taskTracker + " declared slow. trackerMapStats.get(taskTracker).mean() :" + trackerMapStats . get ( taskTracker ) . mean ( ) + " mapTaskStats :" + mapTaskStats ) ; } return true ; } if ( trackerReduceStats . get ( taskTracker ) != null && trackerReduceStats . get ( taskTracker ) . mean ( ) - reduceTaskStats . mean ( ) > reduceTaskStats . std ( ) * slowNodeThreshold ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Tracker " + taskTracker + " declared slow. trackerReduceStats.get(taskTracker).mean() :" + trackerReduceStats . get ( taskTracker ) . mean ( ) + " reduceTaskStats :" + reduceTaskStats ) ; } return true ; } return false ; }
Compares the ave progressRate of tasks that have finished on this taskTracker to the ave of all succesfull tasks thus far to see if this TT one is too slow for speculating . slowNodeThreshold is used to determine the number of standard deviations
33,392
void refresh ( long now ) { refreshCandidateSpeculativeMaps ( now ) ; refreshCandidateSpeculativeReduces ( now ) ; refreshTaskCountsAndWaitTime ( TaskType . MAP , now ) ; refreshTaskCountsAndWaitTime ( TaskType . REDUCE , now ) ; }
Refresh speculative task candidates and running tasks . This needs to be called periodically to obtain fresh values .
33,393
protected void refreshTaskCountsAndWaitTime ( TaskType type , long now ) { TaskInProgress [ ] allTips = getTasks ( type ) ; int finishedTips = 0 ; int runningTips = 0 ; int runningTaskAttempts = 0 ; long totalWaitTime = 0 ; long jobStartTime = this . getStartTime ( ) ; for ( TaskInProgress tip : allTips ) { if ( tip . isComplete ( ) ) { finishedTips += 1 ; } else if ( tip . isRunning ( ) ) { runningTaskAttempts += tip . getActiveTasks ( ) . size ( ) ; runningTips += 1 ; } if ( tip . getExecStartTime ( ) > 0 ) { totalWaitTime += tip . getExecStartTime ( ) - jobStartTime ; } else { totalWaitTime += now - jobStartTime ; } } if ( TaskType . MAP == type ) { totalMapWaitTime = totalWaitTime ; runningMapTasks = runningTaskAttempts ; neededMapTasks = numMapTasks - runningTips - finishedTips + neededSpeculativeMaps ( ) ; pendingMapTasks = numMapTasks - runningTaskAttempts - failedMapTIPs - finishedMapTasks + speculativeMapTasks ; } else { totalReduceWaitTime = totalWaitTime ; runningReduceTasks = runningTaskAttempts ; neededReduceTasks = numReduceTasks - runningTips - finishedTips + neededSpeculativeReduces ( ) ; pendingReduceTasks = numReduceTasks - runningTaskAttempts - failedReduceTIPs - finishedReduceTasks + speculativeReduceTasks ; } }
Refresh runningTasks neededTasks and pendingTasks counters
33,394
public void generateSummaryTable ( JspWriter out , JobTracker tracker ) throws IOException { ClusterStatus status = tracker . getClusterStatus ( ) ; int maxMapTasks = status . getMaxMapTasks ( ) ; int maxReduceTasks = status . getMaxReduceTasks ( ) ; int numTaskTrackers = status . getTaskTrackers ( ) ; String tasksPerNodeStr ; if ( numTaskTrackers > 0 ) { double tasksPerNodePct = ( double ) ( maxMapTasks + maxReduceTasks ) / ( double ) numTaskTrackers ; tasksPerNodeStr = percentFormat . format ( tasksPerNodePct ) ; } else { tasksPerNodeStr = "-" ; } out . print ( "<maps>" + status . getMapTasks ( ) + "</maps>\n" + "<reduces>" + status . getReduceTasks ( ) + "</reduces>\n" + "<total_submissions>" + tracker . getTotalSubmissions ( ) + "</total_submissions>\n" + "<nodes>" + status . getTaskTrackers ( ) + "</nodes>\n" + "<map_task_capacity>" + status . getMaxMapTasks ( ) + "</map_task_capacity>\n" + "<reduce_task_capacity>" + status . getMaxReduceTasks ( ) + "</reduce_task_capacity>\n" + "<avg_tasks_per_node>" + tasksPerNodeStr + "</avg_tasks_per_node>\n" ) ; }
Generates an XML - formatted block that summarizes the state of the JobTracker .
33,395
protected StringBuilder appendTo ( StringBuilder builder ) { return taskId . appendTo ( builder ) . append ( SEPARATOR ) . append ( id ) ; }
Add the unique string to the StringBuilder
33,396
public int compareTo ( ID o ) { TaskAttemptID that = ( TaskAttemptID ) o ; int tipComp = this . taskId . compareTo ( that . taskId ) ; if ( tipComp == 0 ) { return this . id - that . id ; } else return tipComp ; }
Compare TaskIds by first tipIds then by task numbers .
33,397
public static void distributeShare ( double total , final Collection < ? extends Schedulable > schedulables , ScheduleComparator comparator ) { switch ( comparator ) { case FIFO : case DEADLINE : Schedulable . distributeShareSorted ( total , schedulables , comparator ) ; break ; case FAIR : Schedulable . distributeShareFair ( total , schedulables ) ; break ; case PRIORITY : Schedulable . distributeSharePriority ( total , schedulables ) ; break ; default : throw new IllegalArgumentException ( "Unknown comparator" ) ; } }
Distribute the shares among the schedulables based on the comparator
33,398
private static void distributeShareSorted ( double total , final Collection < ? extends Schedulable > schedulables , ScheduleComparator comparator ) { List < Schedulable > sches = new ArrayList < Schedulable > ( schedulables ) ; Collections . sort ( sches , comparator ) ; for ( Schedulable schedulable : sches ) { int max = Math . min ( schedulable . getRequested ( ) , schedulable . getMaximum ( ) ) ; if ( total > max ) { schedulable . share = max ; total -= max ; } else { schedulable . share = total ; return ; } } }
Distribute the share among Schedulables in a greedy manner when they are sorted based on some comparator and the first Schedulable has to be fully satisfied before the next one can get any resources
33,399
private static void distributeShareFair ( double total , final Collection < ? extends Schedulable > schedulables ) { BinarySearcher searcher = new BinarySearcher ( ) { protected double targetFunction ( double x ) { return totalShareWithRatio ( schedulables , x ) ; } } ; double ratio = searcher . getSolution ( total ) ; for ( Schedulable schedulable : schedulables ) { schedulable . share = shareWithRatio ( schedulable , ratio ) ; } }
Distribute the total share among the list of schedulables according to the FAIR model . Finds a way to distribute the share in such a way that all the min and max reservations of the schedulables are satisfied