idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
33,100
void internalReleaseLease ( Lease lease , String src , INodeFileUnderConstruction pendingFile ) throws IOException { if ( lease . hasPath ( ) ) { String [ ] leasePaths = new String [ lease . getPaths ( ) . size ( ) ] ; lease . getPaths ( ) . toArray ( leasePaths ) ; LOG . info ( "Recovering lease: " + lease + " for pat...
This is invoked when a lease expires . On lease expiry all the files that were written from that dfsclient should be recovered .
33,101
private boolean discardDone ( INodeFileUnderConstruction pendingFile , String src ) throws IOException { Block [ ] blocks = pendingFile . getBlocks ( ) ; if ( blocks == null || blocks . length == 0 ) { return false ; } Block last = blocks [ blocks . length - 1 ] ; if ( last . getNumBytes ( ) == 0 ) { dir . removeBlock ...
the last block completely .
33,102
void updatePipeline ( String clientName , Block oldBlock , Block newBlock , List < DatanodeID > newNodes ) throws IOException { LOG . info ( "updatePipeline(block=" + oldBlock + ", newGenerationStamp=" + newBlock . getGenerationStamp ( ) + ", newLength=" + newBlock . getNumBytes ( ) + ", newNodes=" + newNodes + ")" ) ;...
Update a pipeline for a block under construction
33,103
private String newStorageID ( ) { String newID = null ; while ( newID == null ) { newID = "DS" + Integer . toString ( r . nextInt ( ) ) ; if ( datanodeMap . get ( newID ) != null ) { newID = null ; } } return newID ; }
Generate new storage ID .
33,104
public int computeDatanodeWork ( ) throws IOException { int workFound = 0 ; int blocksToProcess = 0 ; int nodesToProcess = 0 ; if ( isInSafeMode ( ) ) { updateReplicationCounts ( workFound ) ; return workFound ; } synchronized ( heartbeats ) { blocksToProcess = ( int ) ( heartbeats . size ( ) * ReplicationConfigKeys . ...
Compute block replication and block invalidation work that can be scheduled on data - nodes . The datanode will be informed of this work at the next heartbeat .
33,105
int computeInvalidateWork ( int nodesToProcess ) { int numOfNodes = 0 ; ArrayList < String > keyArray = null ; readLock ( ) ; try { numOfNodes = recentInvalidateSets . size ( ) ; keyArray = new ArrayList < String > ( recentInvalidateSets . keySet ( ) ) ; } finally { readUnlock ( ) ; } nodesToProcess = Math . min ( numO...
Schedule blocks for deletion at datanodes
33,106
private int getQuotaForThisPriority ( int totalQuota , int blocksForThisPriority , int blocksForLowerPriorities ) { int quotaForLowerPriorities = Math . min ( totalQuota / 5 , blocksForLowerPriorities ) ; return Math . min ( blocksForThisPriority , totalQuota - quotaForLowerPriorities ) ; }
Decide the number of blocks to replicate on for this priority . The heuristic is that allocate at most 20% the quota for lower priority blocks
33,107
List < List < BlockInfo > > chooseUnderReplicatedBlocks ( int blocksToProcess ) { List < List < BlockInfo > > blocksToReplicate = new ArrayList < List < BlockInfo > > ( UnderReplicatedBlocks . LEVEL ) ; for ( int i = 0 ; i < UnderReplicatedBlocks . LEVEL ; i ++ ) { blocksToReplicate . add ( new ArrayList < BlockInfo > ...
Get a list of block lists to be replicated The index of block lists represents the
33,108
private void updateReplicationMetrics ( List < ReplicationWork > work ) { for ( ReplicationWork rw : work ) { DatanodeDescriptor [ ] targets = rw . targets ; if ( targets == null ) continue ; for ( DatanodeDescriptor target : targets ) { if ( clusterMap . isOnSameRack ( rw . srcNode , target ) ) { myFSMetrics . numLoca...
Update replication metrics
33,109
private DatanodeDescriptor [ ] chooseTarget ( ReplicationWork work ) { if ( ! neededReplications . contains ( work . block ) ) { return null ; } if ( work . blockSize == BlockFlags . NO_ACK ) { LOG . warn ( "Block " + work . block . getBlockId ( ) + " of the file " + getFullPathName ( work . fileINode ) + " is invalida...
Wrapper function for choosing targets for replication .
33,110
private boolean isGoodReplica ( DatanodeDescriptor node , Block block ) { Collection < Block > excessBlocks = excessReplicateMap . get ( node . getStorageID ( ) ) ; Collection < DatanodeDescriptor > nodesCorrupt = corruptReplicas . getNodes ( block ) ; return ( nodesCorrupt == null || ! nodesCorrupt . contains ( node )...
Decide if a replica is valid
33,111
void processPendingReplications ( ) { BlockInfo [ ] timedOutItems = pendingReplications . getTimedOutBlocks ( ) ; if ( timedOutItems != null ) { writeLock ( ) ; try { for ( int i = 0 ; i < timedOutItems . length ; i ++ ) { NumberReplicas num = countNodes ( timedOutItems [ i ] ) ; neededReplications . add ( timedOutItem...
If there were any replication requests that timed out reap them and put them back into the neededReplication queue
33,112
void clearReplicationQueues ( ) { writeLock ( ) ; try { synchronized ( neededReplications ) { neededReplications . clear ( ) ; } underReplicatedBlocksCount = 0 ; corruptReplicas . clear ( ) ; corruptReplicaBlocksCount = 0 ; overReplicatedBlocks . clear ( ) ; raidEncodingTasks . clear ( ) ; excessReplicateMap = new Hash...
Clear replication queues . This is used by standby avatar to reclaim memory .
33,113
void wipeDatanode ( DatanodeID nodeID ) throws IOException { String key = nodeID . getStorageID ( ) ; host2DataNodeMap . remove ( datanodeMap . remove ( key ) ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "BLOCK* NameSystem.wipeDatanode: " + nodeID . getName ( ) + " sto...
Physically remove node from datanodeMap .
33,114
void heartbeatCheck ( ) { if ( ! getNameNode ( ) . shouldCheckHeartbeat ( ) ) { return ; } boolean allAlive = false ; while ( ! allAlive ) { boolean foundDead = false ; DatanodeID nodeID = null ; synchronized ( heartbeats ) { for ( Iterator < DatanodeDescriptor > it = heartbeats . iterator ( ) ; it . hasNext ( ) ; ) { ...
Check if there are any expired heartbeats and if so whether any blocks have to be re - replicated . While removing dead datanodes make sure that only one datanode is marked dead at a time within the synchronized section . Otherwise a cascading effect causes more datanodes to be declared dead .
33,115
private boolean checkBlockSize ( Block block , INodeFile inode ) { if ( block . getNumBytes ( ) < 0 ) { return false ; } BlockInfo [ ] blocks = inode . getBlocks ( ) ; if ( blocks . length == 0 ) { return false ; } return block . getNumBytes ( ) <= inode . getPreferredBlockSize ( ) ; }
Return true if the block size number is valid
33,116
private void rejectAddStoredBlock ( Block block , DatanodeDescriptor node , String msg , boolean ignoreInfoLogs , final boolean parallelInitialBlockReport ) { if ( ( ! isInSafeModeInternal ( ) ) && ( ! ignoreInfoLogs ) ) { NameNode . stateChangeLog . info ( "BLOCK* NameSystem.addStoredBlock: " + "addStoredBlock request...
Log a rejection of an addStoredBlock RPC invalidate the reported block .
33,117
private void processOverReplicatedBlocksAsync ( ) { if ( isInSafeMode ( ) ) { return ; } if ( delayOverreplicationMonitorTime > now ( ) ) { LOG . info ( "Overreplication monitor delayed for " + ( ( delayOverreplicationMonitorTime - now ( ) ) / 1000 ) + " seconds" ) ; return ; } nameNode . clearOutstandingNodes ( ) ; fi...
This is called from the ReplicationMonitor to process over replicated blocks .
33,118
private void updateNeededReplicationQueue ( BlockInfo blockInfo , int delta , int numCurrentReplicas , int numCurrentDecommissionedReplicas , DatanodeDescriptor node , short fileReplication ) { int numOldReplicas = numCurrentReplicas ; int numOldDecommissionedReplicas = numCurrentDecommissionedReplicas ; if ( node . is...
Update a block s priority queue in neededReplicaiton queues
33,119
private boolean blockReceived ( Block block , String delHint , DatanodeDescriptor node ) throws IOException { assert ( hasWriteLock ( ) ) ; node . decBlocksScheduled ( ) ; DatanodeDescriptor delHintNode = null ; if ( delHint != null && delHint . length ( ) != 0 ) { delHintNode = datanodeMap . get ( delHint ) ; if ( del...
The given node is reporting that it received a certain block .
33,120
DatanodeInfo [ ] getDatanodes ( DatanodeReportType type ) { ArrayList < DatanodeDescriptor > results = getDatanodeListForReport ( type ) ; DatanodeInfo [ ] arr = new DatanodeInfo [ results . size ( ) ] ; for ( int i = 0 ; i < arr . length ; i ++ ) { arr [ i ] = new DatanodeInfo ( results . get ( i ) ) ; } return arr ; ...
Get all the datanodes of the given type
33,121
void saveNamespace ( boolean force , boolean uncompressed ) throws AccessControlException , IOException { LOG . info ( "Saving namespace" ) ; writeLock ( ) ; try { checkSuperuserPrivilege ( ) ; if ( ! force && ! isInSafeMode ( ) ) { throw new IOException ( "Safe mode should be turned ON " + "in order to create namespac...
Save namespace image . This will save current namespace into fsimage file and empty edits file . Requires superuser privilege and safe mode .
33,122
private void datanodeDump ( PrintWriter out ) { readLock ( ) ; try { synchronized ( datanodeMap ) { out . println ( "Metasave: Number of datanodes: " + datanodeMap . size ( ) ) ; for ( Iterator < DatanodeDescriptor > it = datanodeMap . values ( ) . iterator ( ) ; it . hasNext ( ) ; ) { DatanodeDescriptor node = it . ne...
Prints information about all datanodes .
33,123
void startDecommission ( DatanodeDescriptor node ) throws IOException { if ( ! node . isDecommissionInProgress ( ) && ! node . isDecommissioned ( ) ) { LOG . info ( "Start Decommissioning node " + node . getName ( ) + " with " + node . numBlocks ( ) + " blocks." ) ; synchronized ( heartbeats ) { updateStats ( node , fa...
Start decommissioning the specified datanode .
33,124
void stopDecommission ( DatanodeDescriptor node ) throws IOException { if ( ( node . isDecommissionInProgress ( ) && ( ( Monitor ) dnthread . getRunnable ( ) ) . stopDecommission ( node ) ) || node . isDecommissioned ( ) ) { LOG . info ( "Stop Decommissioning node " + node . getName ( ) ) ; synchronized ( heartbeats ) ...
Stop decommissioning the specified datanodes .
33,125
private int countLiveNodes ( Block b , Iterator < DatanodeDescriptor > nodeIter ) { int live = 0 ; Collection < DatanodeDescriptor > nodesCorrupt = null ; if ( corruptReplicas . size ( ) != 0 ) { nodesCorrupt = corruptReplicas . getNodes ( b ) ; } while ( nodeIter . hasNext ( ) ) { DatanodeDescriptor node = nodeIter . ...
Counts the number of live nodes in the given list
33,126
BlockInfo isReplicationInProgress ( final DecommissioningStatus status , final DatanodeDescriptor srcNode , final BlockInfo block , boolean addToNeeded ) { INode fileINode = blocksMap . getINode ( block ) ; if ( fileINode == null ) { return null ; } NumberReplicas num = countNodes ( block ) ; int curReplicas = num . li...
Check if a block on srcNode has reached its replication factor or not
33,127
private String getHostNameForIp ( String ipAddr ) { try { InetAddress addr = InetAddress . getByName ( ipAddr ) ; return addr . getHostName ( ) ; } catch ( Exception e ) { } return null ; }
Best effort reverse DNS resolution . Returns null on error .
33,128
private boolean verifyNodeRegistration ( DatanodeRegistration nodeReg , String ipAddr ) throws IOException { assert ( hasWriteLock ( ) ) ; return inHostsList ( nodeReg , ipAddr ) ; }
Checks if the node is not on the hosts list . If it is not then it will be disallowed from registering .
33,129
private void checkDecommissioning ( DatanodeDescriptor nodeReg , String ipAddr ) throws IOException { if ( inExcludedHostsList ( nodeReg , ipAddr ) ) { startDecommission ( nodeReg ) ; } }
Decommission the node if it is in exclude list .
33,130
public DatanodeDescriptor getDatanode ( DatanodeID nodeID ) throws IOException { UnregisteredDatanodeException e = null ; DatanodeDescriptor node = datanodeMap . get ( nodeID . getStorageID ( ) ) ; if ( node == null ) { return null ; } if ( ! node . getName ( ) . equals ( nodeID . getName ( ) ) ) { e = new Unregistered...
Get data node by storage ID .
33,131
void incrementSafeBlockCount ( int replication , boolean skipCheck ) { if ( safeMode != null && safeMode . isOn ( ) ) { if ( ( int ) replication == minReplication ) { this . blocksSafe ++ ; if ( ! skipCheck ) { safeMode . checkMode ( ) ; } } } }
Increment number of blocks that reached minimal replication .
33,132
void enterSafeMode ( ) throws IOException { writeLock ( ) ; try { getEditLog ( ) . logSyncAll ( ) ; if ( ! isInSafeMode ( ) ) { safeMode = SafeModeUtil . getInstance ( this ) ; safeMode . setManual ( ) ; return ; } safeMode . setManual ( ) ; getEditLog ( ) . logSyncAll ( ) ; NameNode . stateChangeLog . info ( "STATE* S...
Enter safe mode manually .
33,133
void leaveSafeMode ( boolean checkForUpgrades ) throws SafeModeException { writeLock ( ) ; try { if ( ! isInSafeMode ( ) ) { NameNode . stateChangeLog . info ( "STATE* Safe mode is already OFF." ) ; return ; } if ( getDistributedUpgradeState ( ) ) { throw new SafeModeException ( "Distributed upgrade is in progress" , s...
Leave safe mode .
33,134
void initializeReplQueues ( ) throws SafeModeException { writeLock ( ) ; try { if ( isPopulatingReplQueues ( ) ) { NameNode . stateChangeLog . info ( "STATE* Safe mode is already OFF." + " Replication queues are initialized" ) ; return ; } safeMode . initializeReplicationQueues ( ) ; } finally { writeUnlock ( ) ; } }
Manually initialize replication queues when in safemode .
33,135
public long nextGenerationStampForBlock ( Block block , boolean fromNN ) throws IOException { writeLock ( ) ; try { if ( isInSafeMode ( ) ) { throw new SafeModeException ( "Cannot get nextGenStamp for " + block , safeMode ) ; } Block blockWithWildcardGenstamp = new Block ( block . getBlockId ( ) ) ; BlockInfo storedBlo...
Verifies that the block is associated with a file that has a lease . Increments logs and then returns the stamp
33,136
void saveFilesUnderConstruction ( SaveNamespaceContext ctx , DataOutputStream out ) throws IOException { synchronized ( leaseManager ) { int pathsToSave = 0 ; Iterator < Lease > itrl = leaseManager . getSortedLeases ( ) . iterator ( ) ; while ( itrl . hasNext ( ) ) { Lease lease = itrl . next ( ) ; for ( String path : ...
Serializes leases .
33,137
public String getLiveNodes ( ) { final Map < String , Map < String , Object > > info = new HashMap < String , Map < String , Object > > ( ) ; try { final ArrayList < DatanodeDescriptor > liveNodeList = new ArrayList < DatanodeDescriptor > ( ) ; final ArrayList < DatanodeDescriptor > deadNodeList = new ArrayList < Datan...
Returned information is a JSON representation of map with host name as the key and value is a map of live node attribute keys to its values
33,138
public String getDecomNodes ( ) { final Map < String , Map < String , Object > > info = new HashMap < String , Map < String , Object > > ( ) ; try { final ArrayList < DatanodeDescriptor > decomNodeList = this . getDecommissioningNodesList ( ) ; for ( DatanodeDescriptor node : decomNodeList ) { final Map < String , Obje...
Returned information is a JSON representation of map with host name as the key and value is a map of decomisioning node attribute keys to its values
33,139
public short adjustReplication ( short replication ) { short r = ( short ) ( replication < minReplication ? minReplication : replication > maxReplication ? maxReplication : replication ) ; return r ; }
Clamp the specified replication between the minimum and the maximum replication levels .
33,140
public static URI translateToOldSchema ( Configuration clusterConf , String nameserviceId ) { String key = FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY + "." + nameserviceId ; String value = clusterConf . get ( key ) ; if ( value == null ) { throw new IllegalArgumentException ( "Cannot translate to old schema for nameser...
Translates nameserviceId to ZK key in deprecated layout
33,141
public static SocketFactory getSocketFactoryFromProperty ( Configuration conf , String propValue ) { try { Class < ? > theClass = conf . getClassByName ( propValue ) ; return ( SocketFactory ) ReflectionUtils . newInstance ( theClass , conf ) ; } catch ( ClassNotFoundException cnfe ) { throw new RuntimeException ( "Soc...
Get the socket factory corresponding to the given proxy URI . If the given proxy URI corresponds to an absence of configuration parameter returns null . If the URI is malformed raises an exception .
33,142
public static String getServerAddress ( Configuration conf , String oldBindAddressName , String oldPortName , String newBindAddressName ) { String oldAddr = conf . get ( oldBindAddressName ) ; int oldPort = conf . getInt ( oldPortName , 0 ) ; String newAddrPort = conf . get ( newBindAddressName ) ; if ( oldAddr == null...
Handle the transition from pairs of attributes specifying a host and port to a single colon separated one .
33,143
public static List < String > normalizeHostNames ( Collection < String > names ) { List < String > resolvedIpAddresses = new ArrayList < String > ( names . size ( ) ) ; for ( String name : names ) { resolvedIpAddresses . add ( normalizeHostName ( name ) ) ; } return resolvedIpAddresses ; }
Given a collection of string representation of hosts return a list of corresponding IP addresses in the textual representation .
33,144
public static void isSocketBindable ( InetSocketAddress addr ) throws IOException { if ( addr == null ) { return ; } ServerSocket socket = new ServerSocket ( ) ; try { socket . bind ( addr ) ; } finally { socket . close ( ) ; } }
Tries to bind to the given address . Throws an exception on failure . Used to fail earlier and verify configuration values .
33,145
public boolean addSourceFile ( FileSystem fs , PolicyInfo info , FileStatus src , RaidState . Checker checker , long now , int targetReplication ) throws IOException { List < FileStatus > lfs = RaidNode . listDirectoryRaidFileStatus ( fs . getConf ( ) , fs , src . getPath ( ) ) ; if ( lfs == null ) { return false ; } R...
Collect the statistics of a source directory . Return true if the file should be raided but not .
33,146
private void incrementAttemptUnprotected ( ) { attempt ++ ; currentAttemptId = new TaskAttemptID ( new TaskID ( attemptJobId , currentAttemptId . isMap ( ) , currentAttemptId . getTaskID ( ) . getId ( ) ) , attempt ) ; }
Increment the attempt number for launching a remote corona job tracker . Must be called only when holding the object lock .
33,147
private void checkAttempt ( TaskAttemptID attemptId ) throws IOException { if ( ! attemptId . equals ( currentAttemptId ) ) { throw new IOException ( "Attempt " + attemptId + " does not match current attempt " + currentAttemptId ) ; } }
Checks whether provided attempt id of remote JT matches currently set throws if not
33,148
void initializeClientUnprotected ( String host , int port , String sessionId ) throws IOException { if ( client != null ) { return ; } LOG . info ( "Creating JT client to " + host + ":" + port ) ; long connectTimeout = RemoteJTProxy . getRemotJTTimeout ( conf ) ; int rpcTimeout = RemoteJTProxy . getRemoteJTRPCTimeout (...
Create the RPC client to the remote corona job tracker .
33,149
public void waitForJTStart ( JobConf jobConf ) throws IOException { int maxJTAttempts = jobConf . getInt ( "mapred.coronajobtracker.remotejobtracker.attempts" , 4 ) ; ResourceTracker resourceTracker = jt . getResourceTracker ( ) ; SessionDriver sessionDriver = jt . getSessionDriver ( ) ; List < ResourceGrant > excludeG...
Waits for the remote Corona JT to be ready . This involves - getting a JOBTRACKER resource from the cluster manager . - starting the remote job tracker by connecting to the corona task tracker on the machine . - waiting for the remote job tracker to report its port back to this process .
33,150
private ResourceGrant waitForJTGrant ( ResourceTracker resourceTracker , SessionDriver sessionDriver , List < ResourceGrant > previousGrants ) throws IOException , InterruptedException { LOG . info ( "Waiting for JT grant for " + attemptJobId ) ; ResourceRequest req = resourceTracker . newJobTrackerRequest ( ) ; for ( ...
Wait for a JOBTRACKER grant .
33,151
private boolean startRemoteJT ( JobConf jobConf , ResourceGrant grant ) throws InterruptedException { org . apache . hadoop . corona . InetAddress ttAddr = Utilities . appInfoToAddress ( grant . appInfo ) ; CoronaTaskTrackerProtocol coronaTT = null ; try { coronaTT = jt . getTaskTrackerClient ( ttAddr . getHost ( ) , t...
Start corona job tracker on the machine provided by using the corona task tracker API .
33,152
public void close ( ) { clientLock . writeLock ( ) . lock ( ) ; try { if ( client != null ) { RPC . stopProxy ( client ) ; client = null ; } } finally { clientLock . writeLock ( ) . unlock ( ) ; } }
Stop RPC client .
33,153
private JobSubmissionProtocol checkClient ( ) throws IOException { synchronized ( this ) { while ( client == null ) { try { if ( remoteJTStatus == RemoteJTStatus . FAILURE ) { throw new IOException ( "Remote Job Tracker is not available" ) ; } this . wait ( 1000 ) ; } catch ( InterruptedException e ) { throw new IOExce...
Check if the RPC client to the remote job tracker is ready and wait if not .
33,154
boolean isProcessTreeOverLimit ( String tId , long currentMemUsage , long curMemUsageOfAgedProcesses , long limit ) { boolean isOverLimit = false ; if ( currentMemUsage > ( 2 * limit ) ) { LOG . warn ( "Process tree for task: " + tId + " running over twice " + "the configured limit. Limit=" + limit + ", current usage =...
Check whether a task s process tree s current memory usage is over limit .
33,155
boolean isProcessTreeOverLimit ( ProcfsBasedProcessTree pTree , String tId , long limit ) { long currentMemUsage = pTree . getCumulativeVmem ( ) ; long curMemUsageOfAgedProcesses = pTree . getCumulativeVmem ( 1 ) ; return isProcessTreeOverLimit ( tId , currentMemUsage , curMemUsageOfAgedProcesses , limit ) ; }
method provided just for easy testing purposes
33,156
private long getTaskCumulativeRssmem ( TaskAttemptID tid ) { ProcessTreeInfo ptInfo = processTreeInfoMap . get ( tid ) ; ProcfsBasedProcessTree pTree = ptInfo . getProcessTree ( ) ; return pTree == null ? 0 : pTree . getCumulativeVmem ( ) ; }
Return the cumulative rss memory used by a task
33,157
private void failTasksWithMaxRssMemory ( long rssMemoryInUsage , long availableRssMemory ) { List < TaskAttemptID > tasksToKill = new ArrayList < TaskAttemptID > ( ) ; List < TaskAttemptID > allTasks = new ArrayList < TaskAttemptID > ( ) ; allTasks . addAll ( processTreeInfoMap . keySet ( ) ) ; Collections . sort ( all...
Starting from the tasks use the highest amount of RSS memory fail the tasks until the RSS memory meets the requirement
33,158
public static byte [ ] prepareCachedNameBytes ( String entityName ) { UTF8 name = new UTF8 ( ) ; name . set ( entityName , true ) ; byte nameBytes [ ] = name . getBytes ( ) ; byte cachedName [ ] = new byte [ nameBytes . length + 2 ] ; System . arraycopy ( nameBytes , 0 , cachedName , 2 , nameBytes . length ) ; int v = ...
Prepares a byte array for given name together with lenght as the two trailing bytes .
33,159
private static Class < ? > getClassWithCaching ( String className , Configuration conf ) { Class < ? > classs = cachedClassObjects . get ( className ) ; if ( classs == null ) { try { classs = conf . getClassByName ( className ) ; if ( cachedClassObjects . size ( ) < CACHE_MAX_SIZE ) { cachedClassObjects . put ( classNa...
Retrieve Class for given name from cache . if not present cache it it the map capacity is not exceeded .
33,160
private boolean blocksEquals ( Block [ ] a1 , Block [ ] a2 , boolean closedFile ) { if ( a1 == a2 ) return true ; if ( a1 == null || a2 == null || a2 . length != a1 . length ) return false ; for ( int i = 0 ; i < a1 . length ; i ++ ) { Block b1 = a1 [ i ] ; Block b2 = a2 [ i ] ; if ( b1 == b2 ) continue ; if ( b1 == nu...
Comapre two arrays of blocks . If the file is open do not compare sizes of the blocks .
33,161
void abort ( Throwable t ) throws IOException { LOG . info ( "Aborting because of " + StringUtils . stringifyException ( t ) ) ; try { downlink . abort ( ) ; downlink . flush ( ) ; } catch ( IOException e ) { } try { handler . waitForFinish ( ) ; } catch ( Throwable ignored ) { process . destroy ( ) ; } IOException wra...
Abort the application and wait for it to finish .
33,162
void cleanup ( ) throws IOException { serverSocket . close ( ) ; try { downlink . close ( ) ; } catch ( InterruptedException ie ) { Thread . currentThread ( ) . interrupt ( ) ; } }
Clean up the child procress and socket .
33,163
static Process runClient ( List < String > command , Map < String , String > env ) throws IOException { ProcessBuilder builder = new ProcessBuilder ( command ) ; if ( env != null ) { builder . environment ( ) . putAll ( env ) ; } Process result = builder . start ( ) ; return result ; }
Run a given command in a subprocess including threads to copy its stdout and stderr to our stdout and stderr .
33,164
synchronized void add ( Range range ) { if ( range . isEmpty ( ) ) { return ; } long startIndex = range . getStartIndex ( ) ; long endIndex = range . getEndIndex ( ) ; SortedSet < Range > headSet = ranges . headSet ( range ) ; if ( headSet . size ( ) > 0 ) { Range previousRange = headSet . last ( ) ; LOG . debug ( "pre...
Add the range indices . It is ensured that the added range doesn t overlap the existing ranges . If it overlaps the existing overlapping ranges are removed and a single range having the superset of all the removed ranges and this range is added . If the range is of 0 length doesn t do anything .
33,165
private JobStory getNextJobFiltered ( ) throws IOException { while ( true ) { ZombieJob job = producer . getNextJob ( ) ; if ( job == null ) { return null ; } if ( job . getOutcome ( ) == Pre21JobHistoryConstants . Values . KILLED ) { continue ; } if ( job . getNumberMaps ( ) == 0 ) { continue ; } if ( job . getNumLogg...
Filter some jobs being fed to the simulator . For now we filter out killed jobs to facilitate debugging .
33,166
public void addColumn ( ColumnName name , boolean primary ) { ColumnHeader < ColumnName > top = new ColumnHeader < ColumnName > ( name , 0 ) ; top . up = top ; top . down = top ; if ( primary ) { Node < ColumnName > tail = head . left ; tail . right = top ; top . left = tail ; top . right = head ; head . left = top ; }...
Add a column to the table
33,167
public void addRow ( boolean [ ] values ) { Node < ColumnName > prev = null ; for ( int i = 0 ; i < values . length ; ++ i ) { if ( values [ i ] ) { ColumnHeader < ColumnName > top = columns . get ( i ) ; top . size += 1 ; Node < ColumnName > bottom = top . up ; Node < ColumnName > node = new Node < ColumnName > ( null...
Add a row to the table .
33,168
private ColumnHeader < ColumnName > findBestColumn ( ) { int lowSize = Integer . MAX_VALUE ; ColumnHeader < ColumnName > result = null ; ColumnHeader < ColumnName > current = ( ColumnHeader < ColumnName > ) head . right ; while ( current != head ) { if ( current . size < lowSize ) { lowSize = current . size ; result = ...
Find the column with the fewest choices .
33,169
private void coverColumn ( ColumnHeader < ColumnName > col ) { LOG . debug ( "cover " + col . head . name ) ; col . right . left = col . left ; col . left . right = col . right ; Node < ColumnName > row = col . down ; while ( row != col ) { Node < ColumnName > node = row . right ; while ( node != row ) { node . down . ...
Hide a column in the table
33,170
private List < ColumnName > getRowName ( Node < ColumnName > row ) { List < ColumnName > result = new ArrayList < ColumnName > ( ) ; result . add ( row . head . name ) ; Node < ColumnName > node = row . right ; while ( node != row ) { result . add ( node . head . name ) ; node = node . right ; } return result ; }
Get the name of a row by getting the list of column names that it satisfies .
33,171
private int search ( List < Node < ColumnName > > partial , SolutionAcceptor < ColumnName > output ) { int results = 0 ; if ( head . right == head ) { List < List < ColumnName > > result = new ArrayList < List < ColumnName > > ( partial . size ( ) ) ; for ( Node < ColumnName > row : partial ) { result . add ( getRowNam...
Find a solution to the problem .
33,172
private void searchPrefixes ( int depth , int [ ] choices , List < int [ ] > prefixes ) { if ( depth == 0 ) { prefixes . add ( choices . clone ( ) ) ; } else { ColumnHeader < ColumnName > col = findBestColumn ( ) ; if ( col . size > 0 ) { coverColumn ( col ) ; Node < ColumnName > row = col . down ; int rowId = 0 ; whil...
Generate a list of prefixes down to a given depth . Assumes that the problem is always deeper than depth .
33,173
public List < int [ ] > split ( int depth ) { int [ ] choices = new int [ depth ] ; List < int [ ] > result = new ArrayList < int [ ] > ( 100000 ) ; searchPrefixes ( depth , choices , result ) ; return result ; }
Generate a list of row choices to cover the first moves .
33,174
private Node < ColumnName > advance ( int goalRow ) { ColumnHeader < ColumnName > col = findBestColumn ( ) ; if ( col . size > 0 ) { coverColumn ( col ) ; Node < ColumnName > row = col . down ; int id = 0 ; while ( row != col ) { if ( id == goalRow ) { Node < ColumnName > node = row . right ; while ( node != row ) { co...
Make one move from a prefix
33,175
private void rollback ( Node < ColumnName > row ) { Node < ColumnName > node = row . left ; while ( node != row ) { uncoverColumn ( node . head ) ; node = node . left ; } uncoverColumn ( row . head ) ; }
Undo a prefix exploration
33,176
public int solve ( int [ ] prefix , SolutionAcceptor < ColumnName > output ) { List < Node < ColumnName > > choices = new ArrayList < Node < ColumnName > > ( ) ; for ( int i = 0 ; i < prefix . length ; ++ i ) { choices . add ( advance ( prefix [ i ] ) ) ; } int result = search ( choices , output ) ; for ( int i = prefi...
Given a prefix find solutions under it .
33,177
public synchronized void waitFor ( int minResponses , int minSuccesses , int maxExceptions , int millis , String operationName ) throws InterruptedException , TimeoutException { long st = monotonicNow ( ) ; long nextLogTime = st + ( long ) ( millis * WAIT_PROGRESS_INFO_THRESHOLD ) ; long et = st + millis ; while ( true...
Wait for the quorum to achieve a certain number of responses .
33,178
private synchronized void checkAssertionErrors ( ) { boolean assertsEnabled = false ; assert assertsEnabled = true ; if ( assertsEnabled ) { for ( Throwable t : exceptions . values ( ) ) { if ( t instanceof AssertionError ) { throw ( AssertionError ) t ; } else if ( t instanceof RemoteException && ( ( RemoteException )...
Check if any of the responses came back with an AssertionError . If so it re - throws it even if there was a quorum of responses . This code only runs if assertions are enabled for this class otherwise it should JIT itself away .
33,179
public synchronized void throwQuorumException ( String msg ) throws QuorumException { Preconditions . checkState ( ! exceptions . isEmpty ( ) ) ; throw QuorumException . create ( msg , successes , exceptions ) ; }
Throws exception as a result of this quorum call .
33,180
public void add ( final GridmixJob job ) throws InterruptedException { final boolean addToQueue = ! shutdown ; if ( addToQueue ) { final SubmitTask task = new SubmitTask ( job ) ; sem . acquire ( ) ; try { sched . execute ( task ) ; } catch ( RejectedExecutionException e ) { sem . release ( ) ; } } }
Enqueue the job to be submitted per the deadline associated with it .
33,181
public void join ( long millis ) throws InterruptedException { if ( ! shutdown ) { throw new IllegalStateException ( "Cannot wait for active submit thread" ) ; } sched . awaitTermination ( millis , TimeUnit . MILLISECONDS ) ; }
Continue running until all queued jobs have been submitted to the cluster .
33,182
public void printAllValues ( ) throws Exception { err ( "List of all the available keys:" ) ; Object val = null ; for ( ObjectName oname : hadoopObjectNames ) { err ( ">>>>>>>>jmx name: " + oname . getCanonicalKeyPropertyListString ( ) ) ; MBeanInfo mbinfo = mbsc . getMBeanInfo ( oname ) ; MBeanAttributeInfo [ ] mbinfo...
print all attributes values
33,183
public String getValue ( String key ) throws Exception { Object val = null ; for ( ObjectName oname : hadoopObjectNames ) { try { val = mbsc . getAttribute ( oname , key ) ; } catch ( AttributeNotFoundException anfe ) { continue ; } catch ( ReflectionException re ) { if ( re . getCause ( ) instanceof NoSuchMethodExcept...
get single value by key
33,184
public static long getDirLogicalSize ( List < FileStatus > lfs ) { long totalSize = 0L ; if ( null == lfs ) { return totalSize ; } for ( FileStatus fsStat : lfs ) { totalSize += fsStat . getLen ( ) ; } return totalSize ; }
Get the total logical size in the directory
33,185
public static long getDirPhysicalSize ( List < FileStatus > lfs ) { long totalSize = 0L ; if ( null == lfs ) { return totalSize ; } for ( FileStatus fsStat : lfs ) { totalSize += fsStat . getLen ( ) * fsStat . getReplication ( ) ; } return totalSize ; }
Get the total physical size in the directory
33,186
public static BinaryRecordOutput get ( DataOutput out ) { BinaryRecordOutput bout = ( BinaryRecordOutput ) bOut . get ( ) ; bout . setDataOutput ( out ) ; return bout ; }
Get a thread - local record output for the supplied DataOutput .
33,187
public static boolean isSegmentsFile ( String name ) { return name . startsWith ( IndexFileNames . SEGMENTS ) && ! name . equals ( IndexFileNames . SEGMENTS_GEN ) ; }
Check if the file is a segments_N file
33,188
public static long generationFromSegmentsFileName ( String fileName ) { if ( fileName . equals ( IndexFileNames . SEGMENTS ) ) { return 0 ; } else if ( fileName . startsWith ( IndexFileNames . SEGMENTS ) ) { return Long . parseLong ( fileName . substring ( 1 + IndexFileNames . SEGMENTS . length ( ) ) , Character . MAX_...
Parse the generation off the segments file name and return it .
33,189
protected InterleavedInputStream createInterleavedInputStream ( InputStream in , int metaDataBlockLength , int dataBlockLength , SimpleSeekableFormat . MetaDataConsumer consumer ) { return new InterleavedInputStream ( in , metaDataBlockLength , dataBlockLength , consumer ) ; }
This factory method can be overwritten by subclass to provide different behavior . It s only called in the constructor .
33,190
private boolean moveToNextDataSegment ( ) throws IOException { try { clearDataSegment ( ) ; DataSegmentReader dataSegmentReader = new DataSegmentReader ( dataIn , conf , decompressorCache ) ; dataSegmentIn = dataSegmentReader . getInputStream ( ) ; } catch ( EmptyDataSegmentException e ) { return false ; } catch ( EOFE...
Returns false if there are no more data segments .
33,191
public long seekForward ( ) throws IOException { interleavedIn . skipToLastAvailableMetaDataBlock ( ) ; if ( ! interleavedIn . readMetaDataIfNeeded ( ) ) { throw new EOFException ( "Cannot get a complete metadata block" ) ; } SortedMap < Long , Long > offsetPairs = metaData . getOffsetPairs ( ) ; long uncompressedDataO...
This function seeks forward using all available bytes . It returns the offset after the seek .
33,192
public void refresh ( long position , long skippedUntilTxid ) throws IOException { checkInitialized ( ) ; if ( isInProgress ( ) ) { LedgerHandle ledger = ledgerProvider . openForReading ( ledgerId ) ; journalInputStream . resetLedger ( ledger ) ; } journalInputStream . position ( position ) ; bin = new BufferedInputStr...
Refresh preferably to a known position
33,193
public void blockLocationInfoExpiresIfNeeded ( ) { if ( blkLocInfoExpireTimeout < 0 ) { return ; } long timeNow = System . currentTimeMillis ( ) ; if ( timeBlkLocInfoExpire < timeNow ) { this . writeLock ( ) ; try { long newTimeBlockExpire = Long . MAX_VALUE ; List < LocatedBlock > listToRemove = new ArrayList < Locate...
Deprecate block location information that has lived for too long
33,194
private void initBlkLocInfoExpireMap ( long expireTime ) { if ( blkLocInfoExpireTimeout < 0 ) { return ; } this . blkLocInfoExpireMap = new HashMap < LocatedBlock , Long > ( this . getLocatedBlocks ( ) . size ( ) ) ; for ( LocatedBlock lb : this . getLocatedBlocks ( ) ) { blkLocInfoExpireMap . put ( lb , expireTime ) ;...
Initial the blockExpireMap which keeps track of the expiration time of all block location info .
33,195
public boolean isUnderConstructionBlock ( Block block ) { if ( ! isUnderConstruction ( ) ) { return false ; } LocatedBlock lastBlock = this . get ( this . locatedBlockCount ( ) - 1 ) ; if ( ( this . fileLength <= lastBlock . getStartOffset ( ) + lastBlock . getBlockSize ( ) ) && lastBlock . getBlock ( ) . equals ( bloc...
Determine whether the input block is the block under - construction for the file . If the current file is not under - construction always false is returned .
33,196
public synchronized Counter findCounter ( String group , String name ) { return getGroup ( group ) . getCounterForName ( name ) ; }
Find a counter given the group and the name .
33,197
public synchronized Counter findCounter ( String group , int id , String name ) { return getGroup ( group ) . getCounterForName ( name ) ; }
Find a counter by using strings
33,198
public synchronized void incrCounter ( String group , String counter , long amount ) { getGroup ( group ) . getCounterForName ( counter ) . increment ( amount ) ; }
Increments the specified counter by the specified amount creating it if it didn t already exist .
33,199
public static Counters sum ( Counters a , Counters b ) { Counters counters = new Counters ( ) ; counters . incrAllCounters ( a ) ; counters . incrAllCounters ( b ) ; return counters ; }
Convenience method for computing the sum of two sets of counters .