idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
33,200 | public void log ( Log log ) { log . info ( "Counters: " + size ( ) ) ; for ( Group group : this ) { log . info ( " " + group . getDisplayName ( ) ) ; for ( Counter counter : group ) { log . info ( " " + counter . getDisplayName ( ) + "=" + counter . getCounter ( ) ) ; } } } | Logs the current counter values . |
33,201 | public synchronized String makeCompactString ( ) { StringBuffer buffer = new StringBuffer ( ) ; boolean first = true ; for ( Group group : this ) { for ( Counter counter : group ) { if ( first ) { first = false ; } else { buffer . append ( ',' ) ; } buffer . append ( group . getDisplayName ( ) ) ; buffer . append ( '.'... | Convert a counters object into a single line that is easy to parse . |
33,202 | public synchronized String makeJsonString ( ) { Map < String , Map < String , Long > > data = new HashMap < String , Map < String , Long > > ( ) ; for ( Group group : this ) { Map < String , Long > groupData = new HashMap < String , Long > ( ) ; data . put ( group . getDisplayName ( ) , groupData ) ; for ( Counter coun... | Convert a counters object into a JSON string |
33,203 | public synchronized String makeEscapedCompactString ( ) { StringBuffer buffer = new StringBuffer ( ) ; for ( Group group : this ) { buffer . append ( group . makeEscapedCompactString ( ) ) ; } return buffer . toString ( ) ; } | Represent the counter in a textual format that can be converted back to its object form |
33,204 | private static String getBlock ( String str , char open , char close , IntWritable index ) throws ParseException { StringBuilder split = new StringBuilder ( ) ; int next = StringUtils . findNext ( str , open , StringUtils . ESCAPE_CHAR , index . get ( ) , split ) ; split . setLength ( 0 ) ; if ( next >= 0 ) { ++ next ;... | returns null . |
33,205 | public synchronized static String getUniqueFile ( TaskAttemptContext context , String name , String extension ) { TaskID taskId = context . getTaskAttemptID ( ) . getTaskID ( ) ; int partition = taskId . getId ( ) ; StringBuilder result = new StringBuilder ( ) ; result . append ( name ) ; result . append ( '-' ) ; resu... | Generate a unique filename based on the task id name and extension |
33,206 | public Path getDefaultWorkFile ( TaskAttemptContext context , String extension ) throws IOException { FileOutputCommitter committer = ( FileOutputCommitter ) getOutputCommitter ( context ) ; return new Path ( committer . getWorkPath ( ) , getUniqueFile ( context , "part" , extension ) ) ; } | Get the default path and filename for the output format . |
33,207 | public void reduce ( Text key , Iterator < Text > values , OutputCollector < Text , Text > output , Reporter reporter ) throws IOException { String keyStr = key . toString ( ) ; int pos = keyStr . indexOf ( ValueAggregatorDescriptor . TYPE_SEPARATOR ) ; String type = keyStr . substring ( 0 , pos ) ; ValueAggregator agg... | Combines values for a given key . |
33,208 | synchronized Long getMemReservedForTasks ( TaskTrackerStatus taskTracker , TaskType taskType ) { long vmem = 0 ; for ( TaskStatus task : taskTracker . getTaskReports ( ) ) { if ( ( task . getRunState ( ) == TaskStatus . State . RUNNING ) || ( task . getRunState ( ) == TaskStatus . State . UNASSIGNED ) || ( task . inTas... | Find the memory that is already used by all the running tasks residing on the given TaskTracker . |
33,209 | boolean matchesMemoryRequirements ( JobInProgress job , TaskType taskType , TaskTrackerStatus taskTracker ) { LOG . debug ( "Matching memory requirements of " + job . getJobID ( ) . toString ( ) + " for scheduling on " + taskTracker . trackerName ) ; if ( ! isSchedulingBasedOnMemEnabled ( ) ) { LOG . debug ( "Schedulin... | Check if a TT has enough memory to run of task specified from this job . |
33,210 | public Path getLocalPathToRead ( String pathStr , Configuration conf ) throws IOException { AllocatorPerContext context = obtainContext ( contextCfgItemName ) ; return context . getLocalPathToRead ( pathStr , conf ) ; } | Get a path from the local FS for reading . We search through all the configured dirs for the file s existence and return the complete path to the file when we find one |
33,211 | public boolean ifExists ( String pathStr , Configuration conf ) { AllocatorPerContext context = obtainContext ( contextCfgItemName ) ; return context . ifExists ( pathStr , conf ) ; } | We search through all the configured dirs for the file s existence and return true when we find |
33,212 | private void waitForLastTxIdNode ( AvatarZooKeeperClient zk , Configuration conf ) throws Exception { String address = conf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; long maxWaitTime = this . getMaxWaitTimeForWaitTxid ( ) ; long start = System . currentTimeMillis ( ) ; while ( true ) { if ( System . currentTi... | Waits till the last txid node appears in Zookeeper such that it matches the ssid node . |
33,213 | public int setAvatar ( String role , boolean noverification , String serviceName , String instance ) throws IOException { Avatar dest ; if ( Avatar . ACTIVE . toString ( ) . equalsIgnoreCase ( role ) ) { dest = Avatar . ACTIVE ; } else if ( Avatar . STANDBY . toString ( ) . equalsIgnoreCase ( role ) ) { throw new IOExc... | Sets the avatar to the specified value |
33,214 | public static boolean isServiceSpecified ( String command , Configuration conf , String [ ] argv ) { if ( conf . get ( FSConstants . DFS_FEDERATION_NAMESERVICES ) != null ) { for ( int i = 0 ; i < argv . length ; i ++ ) { if ( argv [ i ] . equals ( "-service" ) ) { return true ; } } printServiceErrorMessage ( command ,... | Checks if the service argument is specified in the command arguments . |
33,215 | private double calculateRate ( long cumulative , long currentTime ) { long timeSinceMapStart = 0 ; assert getPhase ( ) == Phase . MAP : "MapTaskStatus not in map phase!" ; long startTime = getStartTime ( ) ; timeSinceMapStart = currentTime - startTime ; if ( timeSinceMapStart <= 0 ) { LOG . error ( "Current time is " +... | Helper function that calculate the rate given the total so far and the current time |
33,216 | public boolean hasEnoughMemory ( ClusterNode node ) { int total = node . getTotal ( ) . memoryMB ; int free = node . getFree ( ) . memoryMB ; if ( free < nodeReservedMemoryMB ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( node . getHost ( ) + " not enough memory." + " totalMB:" + total + " free:" + free + " limit:... | Check if the node has enough memory to run tasks |
33,217 | private boolean hasEnoughDiskSpace ( ClusterNode node ) { int total = node . getTotal ( ) . diskGB ; int free = node . getFree ( ) . diskGB ; if ( free < nodeReservedDiskGB ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( node . getHost ( ) + " not enough disk space." + " totalMB:" + total + " free:" + free + " limi... | Check if the ndoe has enough disk space to run tasks |
33,218 | public void selectiveClearing ( Key k , short scheme ) { if ( k == null ) { throw new NullPointerException ( "Key can not be null" ) ; } if ( ! membershipTest ( k ) ) { throw new IllegalArgumentException ( "Key is not a member" ) ; } int index = 0 ; int [ ] h = hash . hash ( k ) ; switch ( scheme ) { case RANDOM : inde... | Performs the selective clearing for a given key . |
33,219 | private int minimumFnRemove ( int [ ] h ) { int minIndex = Integer . MAX_VALUE ; double minValue = Double . MAX_VALUE ; for ( int i = 0 ; i < nbHash ; i ++ ) { double keyWeight = getWeight ( keyVector [ h [ i ] ] ) ; if ( keyWeight < minValue ) { minIndex = h [ i ] ; minValue = keyWeight ; } } return minIndex ; } | Chooses the bit position that minimizes the number of false negative generated . |
33,220 | private int maximumFpRemove ( int [ ] h ) { int maxIndex = Integer . MIN_VALUE ; double maxValue = Double . MIN_VALUE ; for ( int i = 0 ; i < nbHash ; i ++ ) { double fpWeight = getWeight ( fpVector [ h [ i ] ] ) ; if ( fpWeight > maxValue ) { maxValue = fpWeight ; maxIndex = h [ i ] ; } } return maxIndex ; } | Chooses the bit position that maximizes the number of false positive removed . |
33,221 | private int ratioRemove ( int [ ] h ) { computeRatio ( ) ; int minIndex = Integer . MAX_VALUE ; double minValue = Double . MAX_VALUE ; for ( int i = 0 ; i < nbHash ; i ++ ) { if ( ratio [ h [ i ] ] < minValue ) { minValue = ratio [ h [ i ] ] ; minIndex = h [ i ] ; } } return minIndex ; } | Chooses the bit position that minimizes the number of false negative generated while maximizing . the number of false positive removed . |
33,222 | private void clearBit ( int index ) { if ( index < 0 || index >= vectorSize ) { throw new ArrayIndexOutOfBoundsException ( index ) ; } List < Key > kl = keyVector [ index ] ; List < Key > fpl = fpVector [ index ] ; int listSize = kl . size ( ) ; for ( int i = 0 ; i < listSize && ! kl . isEmpty ( ) ; i ++ ) { removeKey ... | Clears a specified bit in the bit vector and keeps up - to - date the KeyList vectors . |
33,223 | @ SuppressWarnings ( "unchecked" ) private void createVector ( ) { fpVector = new List [ vectorSize ] ; keyVector = new List [ vectorSize ] ; ratio = new double [ vectorSize ] ; for ( int i = 0 ; i < vectorSize ; i ++ ) { fpVector [ i ] = Collections . synchronizedList ( new ArrayList < Key > ( ) ) ; keyVector [ i ] = ... | Creates and initialises the various vectors . |
33,224 | void setEpoch ( long e ) { Preconditions . checkState ( ! isEpochEstablished ( ) , "Epoch already established: epoch=%s" , myEpoch ) ; myEpoch = e ; for ( AsyncLogger l : loggers ) { l . setEpoch ( e ) ; } } | Set the epoch number used for all future calls . |
33,225 | public void setCommittedTxId ( long txid , boolean force ) { for ( AsyncLogger logger : loggers ) { logger . setCommittedTxId ( txid , force ) ; } } | Set the highest successfully committed txid seen by the writer . This should be called after a successful write to a quorum and is used for extra sanity checks against the protocol . See HDFS - 3863 . |
33,226 | < V > Map < AsyncLogger , V > waitForWriteQuorum ( QuorumCall < AsyncLogger , V > q , int timeoutMs , String operationName ) throws IOException { int majority = getMajoritySize ( ) ; int numLoggers = loggers . size ( ) ; checkMajoritySize ( majority , numLoggers ) ; return waitForQuorumInternal ( q , loggers . size ( )... | Wait for a quorum of loggers to respond to the given call . If a quorum can t be achieved throws a QuorumException . |
33,227 | void appendHtmlReport ( StringBuilder sb ) { sb . append ( "<table class=\"storage\">" ) ; sb . append ( "<thead><tr><td>JN</td><td>Status</td></tr></thead>\n" ) ; for ( AsyncLogger l : loggers ) { sb . append ( "<tr>" ) ; sb . append ( "<td>" + JspUtil . escapeXml ( l . toString ( ) ) + "</td>" ) ; sb . append ( "<td>... | Append an HTML - formatted status readout on the current state of the underlying loggers . |
33,228 | public static void quoteHtmlChars ( OutputStream output , byte [ ] buffer , int off , int len ) throws IOException { for ( int i = off ; i < off + len ; i ++ ) { switch ( buffer [ i ] ) { case '&' : output . write ( ampBytes ) ; break ; case '<' : output . write ( ltBytes ) ; break ; case '>' : output . write ( gtBytes... | Quote all of the active HTML characters in the given string as they are added to the buffer . |
33,229 | public static String quoteHtmlChars ( String item ) { if ( item == null ) { return null ; } byte [ ] bytes = item . getBytes ( ) ; if ( needsQuoting ( bytes , 0 , bytes . length ) ) { ByteArrayOutputStream buffer = new ByteArrayOutputStream ( ) ; try { quoteHtmlChars ( buffer , bytes , 0 , bytes . length ) ; } catch ( ... | Quote the given item to make it html - safe . |
33,230 | public static OutputStream quoteOutputStream ( final OutputStream out ) throws IOException { return new OutputStream ( ) { private byte [ ] data = new byte [ 1 ] ; public void write ( byte [ ] data , int off , int len ) throws IOException { quoteHtmlChars ( out , data , off , len ) ; } public void write ( int b ) throw... | Return an output stream that quotes all of the output . |
33,231 | public static String unquoteHtmlChars ( String item ) { if ( item == null ) { return null ; } int next = item . indexOf ( '&' ) ; if ( next == - 1 ) { return item ; } int len = item . length ( ) ; int posn = 0 ; StringBuilder buffer = new StringBuilder ( ) ; while ( next != - 1 ) { buffer . append ( item . substring ( ... | Remove HTML quoting from a string . |
33,232 | public IndexRecord getIndexInformation ( String mapId , int reduce , Path fileName ) throws IOException { IndexInformation info = cache . get ( mapId ) ; if ( info == null ) { info = readIndexFileToCache ( fileName , mapId ) ; } else { synchronized ( info ) { while ( null == info . mapSpillRecord ) { try { info . wait ... | This method gets the index information for the given mapId and reduce . It reads the index file into cache if it is not already present . |
33,233 | public void removeMap ( String mapId ) { IndexInformation info = cache . remove ( mapId ) ; if ( info != null ) { totalMemoryUsed . addAndGet ( - info . getSize ( ) ) ; if ( ! queue . remove ( mapId ) ) { LOG . warn ( "Map ID" + mapId + " not found in queue!!" ) ; } } else { LOG . info ( "Map ID " + mapId + " not found... | This method removes the map from the cache . It should be called when a map output on this tracker is discarded . |
33,234 | private synchronized void freeIndexInformation ( ) { while ( totalMemoryUsed . get ( ) > totalMemoryAllowed ) { String s = queue . remove ( ) ; IndexInformation info = cache . remove ( s ) ; if ( info != null ) { totalMemoryUsed . addAndGet ( - info . getSize ( ) ) ; } } } | Bring memory usage below totalMemoryAllowed . |
33,235 | private List < TaskTrackerStatus > getStatusesOnHost ( String hostName ) { List < TaskTrackerStatus > statuses = new ArrayList < TaskTrackerStatus > ( ) ; synchronized ( taskTrackers ) { for ( TaskTracker tt : taskTrackers . values ( ) ) { TaskTrackerStatus status = tt . getStatus ( ) ; if ( hostName . equals ( status ... | Get all task tracker statuses on given host |
33,236 | void markCompletedTaskAttempt ( String taskTracker , TaskAttemptID taskid ) { Set < TaskAttemptID > taskset = trackerToMarkedTasksMap . get ( taskTracker ) ; if ( taskset == null ) { taskset = new TreeSet < TaskAttemptID > ( ) ; trackerToMarkedTasksMap . put ( taskTracker , taskset ) ; } taskset . add ( taskid ) ; LOG ... | Mark a task for removal later . This function assumes that the JobTracker is locked on entry . |
33,237 | void markCompletedJob ( JobInProgress job ) { for ( TaskInProgress tip : job . getTasks ( TaskType . JOB_SETUP ) ) { for ( TaskStatus taskStatus : tip . getTaskStatuses ( ) ) { if ( taskStatus . getRunState ( ) != TaskStatus . State . RUNNING && taskStatus . getRunState ( ) != TaskStatus . State . COMMIT_PENDING && tas... | Mark all non - running jobs of the job for pruning . This function assumes that the JobTracker is locked on entry . |
33,238 | synchronized public boolean isBlacklisted ( String trackerID ) { TaskTrackerStatus status = getTaskTrackerStatus ( trackerID ) ; if ( status != null ) { return faultyTrackers . isBlacklisted ( status . getHost ( ) ) ; } return false ; } | Whether the tracker is blacklisted or not |
33,239 | void addNewTracker ( TaskTracker taskTracker ) { TaskTrackerStatus status = taskTracker . getStatus ( ) ; trackerExpiryQueue . add ( status ) ; String hostname = status . getHost ( ) ; if ( getNode ( status . getTrackerName ( ) ) == null ) { resolveAndAddToTopology ( hostname ) ; } Set < TaskTracker > trackers = hostna... | Adds a new node to the jobtracker . It involves adding it to the expiry thread and adding it for resolution |
33,240 | public int getNextHeartbeatInterval ( ) { int clusterSize = getClusterStatus ( ) . getTaskTrackers ( ) ; int heartbeatInterval = Math . max ( ( int ) ( 1000 * HEARTBEATS_SCALING_FACTOR * Math . ceil ( ( double ) clusterSize / NUM_HEARTBEATS_IN_SECOND ) ) , HEARTBEAT_INTERVAL_MIN ) ; return heartbeatInterval ; } | Calculates next heartbeat interval using cluster size . Heartbeat interval is incremented by 1 second for every 100 nodes by default . |
33,241 | private boolean inHostsList ( TaskTrackerStatus status ) { Set < String > hostsList = hostsReader . getHosts ( ) ; return ( hostsList . isEmpty ( ) || hostsList . contains ( status . getHost ( ) ) ) ; } | Return if the specified tasktracker is in the hosts list if one was configured . If none was configured then this returns true . |
33,242 | private boolean inExcludedHostsList ( TaskTrackerStatus status ) { Set < String > excludeList = hostsReader . getExcludedHosts ( ) ; return excludeList . contains ( status . getHost ( ) ) ; } | Return if the specified tasktracker is in the exclude list . |
33,243 | void incrementReservations ( TaskType type , int reservedSlots ) { if ( type . equals ( TaskType . MAP ) ) { reservedMapSlots += reservedSlots ; } else if ( type . equals ( TaskType . REDUCE ) ) { reservedReduceSlots += reservedSlots ; } } | This method assumes the caller has JobTracker lock . |
33,244 | synchronized boolean processHeartbeat ( TaskTrackerStatus trackerStatus , boolean initialContact ) { String trackerName = trackerStatus . getTrackerName ( ) ; synchronized ( taskTrackers ) { synchronized ( trackerExpiryQueue ) { boolean seenBefore = updateTaskTrackerStatus ( trackerName , trackerStatus ) ; TaskTracker ... | Process incoming heartbeat messages from the task trackers . |
33,245 | private void addJobForCleanup ( JobID id ) { for ( String taskTracker : taskTrackers . keySet ( ) ) { LOG . debug ( "Marking job " + id + " for cleanup by tracker " + taskTracker ) ; synchronized ( trackerToJobsToCleanup ) { Set < JobID > jobsToKill = trackerToJobsToCleanup . get ( taskTracker ) ; if ( jobsToKill == nu... | Add a job to cleanup for the tracker . |
33,246 | private List < TaskTrackerAction > getJobsForCleanup ( String taskTracker ) { Set < JobID > jobs = null ; synchronized ( trackerToJobsToCleanup ) { jobs = trackerToJobsToCleanup . remove ( taskTracker ) ; } if ( jobs != null ) { List < TaskTrackerAction > killList = new ArrayList < TaskTrackerAction > ( ) ; for ( JobID... | A tracker wants to know if any job needs cleanup because the job completed . |
33,247 | List < Task > getSetupAndCleanupTasks ( TaskTrackerStatus taskTracker ) throws IOException { int maxMapTasks = taskScheduler . getMaxSlots ( taskTracker , TaskType . MAP ) ; int maxReduceTasks = taskScheduler . getMaxSlots ( taskTracker , TaskType . REDUCE ) ; int numMaps = taskTracker . countOccupiedMapSlots ( ) ; int... | returns cleanup tasks first then setup tasks . |
33,248 | public JobID getNewJobId ( ) throws IOException { JobID id = new JobID ( getTrackerIdentifier ( ) , nextJobId . getAndIncrement ( ) ) ; UserGroupInformation ugi = UserGroupInformation . getCurrentUGI ( ) ; jobToUserMap . put ( id , ugi . getUserName ( ) ) ; LOG . info ( "Job id " + id + " assigned to user " + ugi . get... | Allocates a new JobId string . |
33,249 | protected synchronized JobStatus addJob ( JobID jobId , JobInProgress job ) { totalSubmissions ++ ; synchronized ( jobs ) { synchronized ( taskScheduler ) { jobs . put ( job . getProfile ( ) . getJobID ( ) , job ) ; for ( JobInProgressListener listener : jobInProgressListeners ) { try { listener . jobAdded ( job ) ; } ... | Adds a job to the jobtracker . Make sure that the checks are inplace before adding a job . This is the core job submission logic |
33,250 | private void checkAccess ( JobInProgress job , QueueManager . QueueOperation oper ) throws IOException { UserGroupInformation ugi = UserGroupInformation . getCurrentUGI ( ) ; checkAccess ( job , oper , ugi ) ; } | related to the job . |
33,251 | private void checkAccess ( JobInProgress job , QueueManager . QueueOperation oper , UserGroupInformation ugi ) throws IOException { String queue = job . getProfile ( ) . getQueueName ( ) ; if ( ! queueManager . hasAccess ( queue , job , oper , ugi ) ) { throw new AccessControlException ( "User " + ugi . getUserName ( )... | use the passed ugi for checking the access |
33,252 | public synchronized void failJob ( JobInProgress job ) { if ( null == job ) { LOG . info ( "Fail on null job is not valid" ) ; return ; } JobStatus prevStatus = ( JobStatus ) job . getStatus ( ) . clone ( ) ; LOG . info ( "Failing job " + job . getJobID ( ) ) ; job . fail ( ) ; JobStatus newStatus = ( JobStatus ) job .... | Fail a job and inform the listeners . Other components in the framework should use this to fail a job . |
33,253 | public synchronized void setJobPriority ( JobID jobid , String priority ) throws IOException { JobInProgress job = jobs . get ( jobid ) ; if ( null == job ) { LOG . info ( "setJobPriority(): JobId " + jobid . toString ( ) + " is not a valid job" ) ; return ; } checkAccess ( job , QueueManager . QueueOperation . ADMINIS... | Set the priority of a job |
33,254 | TaskStatus [ ] getTaskStatuses ( TaskID tipid ) { TaskInProgress tip = getTip ( tipid ) ; return ( tip == null ? new TaskStatus [ 0 ] : tip . getTaskStatuses ( ) ) ; } | Get all the TaskStatuses from the tipid . |
33,255 | TaskStatus getTaskStatus ( TaskAttemptID taskid ) { TaskInProgress tip = getTip ( taskid . getTaskID ( ) ) ; return ( tip == null ? null : tip . getTaskStatus ( taskid ) ) ; } | Returns the TaskStatus for a particular taskid . |
33,256 | Counters getTipCounters ( TaskID tipid ) { TaskInProgress tip = getTip ( tipid ) ; return ( tip == null ? null : tip . getCounters ( ) ) ; } | Returns the counters for the specified task in progress . |
33,257 | synchronized void setJobPriority ( JobID jobId , JobPriority priority ) { JobInProgress job = jobs . get ( jobId ) ; if ( job != null ) { synchronized ( taskScheduler ) { JobStatus oldStatus = ( JobStatus ) job . getStatus ( ) . clone ( ) ; job . setPriority ( priority ) ; JobStatus newStatus = ( JobStatus ) job . getS... | Change the run - time priority of the given job . |
33,258 | void updateTaskStatuses ( TaskTrackerStatus status ) { String trackerName = status . getTrackerName ( ) ; for ( TaskStatus report : status . getTaskReports ( ) ) { report . setTaskTracker ( trackerName ) ; TaskAttemptID taskId = report . getTaskID ( ) ; if ( report . getRunState ( ) != TaskStatus . State . UNASSIGNED )... | Accept and process a new TaskTracker profile . We might have known about the TaskTracker previously or it might be brand - new . All task - tracker structures have already been updated . Just process the contained tasks and any jobs that might be affected . |
33,259 | void lostTaskTracker ( TaskTracker taskTracker ) { String trackerName = taskTracker . getTrackerName ( ) ; LOG . info ( "Lost tracker '" + trackerName + "'" ) ; synchronized ( trackerToJobsToCleanup ) { trackerToJobsToCleanup . remove ( trackerName ) ; } synchronized ( trackerToTasksToCleanup ) { trackerToTasksToCleanu... | We lost the task tracker! All task - tracker structures have already been updated . Just process the contained tasks and any jobs that might be affected . |
33,260 | private void removeTracker ( TaskTracker tracker ) { String trackerName = tracker . getTrackerName ( ) ; lostTaskTracker ( tracker ) ; TaskTrackerStatus status = tracker . getStatus ( ) ; if ( isBlacklisted ( trackerName ) ) { faultyTrackers . decrBlackListedTrackers ( 1 ) ; } updateTaskTrackerStatus ( trackerName , nu... | Remove a tracker from the system |
33,261 | public static void main ( String argv [ ] ) throws IOException , InterruptedException { StringUtils . startupShutdownMessage ( JobTracker . class , argv , LOG ) ; try { if ( argv . length == 0 ) { JobTracker tracker = startTracker ( new JobConf ( ) ) ; tracker . offerService ( ) ; return ; } if ( "-instance" . equals (... | Start the JobTracker process . This is used only for debugging . As a rule JobTracker should be run as part of the DFS Namenode process . |
33,262 | private static void dumpConfiguration ( Writer writer ) throws IOException { Configuration . dumpConfiguration ( new JobConf ( ) , writer ) ; writer . write ( "\n" ) ; QueueManager . dumpConfiguration ( writer ) ; writer . write ( "\n" ) ; } | Dumps the configuration properties in Json format |
33,263 | private void checkMemoryRequirements ( JobInProgress job ) throws IOException { if ( ! perTaskMemoryConfigurationSetOnJT ( ) ) { LOG . debug ( "Per-Task memory configuration is not set on JT. " + "Not checking the job for invalid memory requirements." ) ; return ; } boolean invalidJob = false ; String msg = "" ; long m... | Check the job if it has invalid requirements and throw and IOException if does have . |
33,264 | private void updateTotalTaskCapacity ( TaskTrackerStatus status ) { int mapSlots = taskScheduler . getMaxSlots ( status , TaskType . MAP ) ; String trackerName = status . getTrackerName ( ) ; Integer oldMapSlots = trackerNameToMapSlots . get ( trackerName ) ; if ( oldMapSlots == null ) { oldMapSlots = 0 ; } int delta =... | Update totalMapTaskCapacity and totalReduceTaskCapacity to resolve the number of slots changed in a tasktracker . The change could be from TaskScheduler or TaskTrackerStatus |
33,265 | private void removeTaskTrackerCapacity ( TaskTrackerStatus status ) { Integer mapSlots = trackerNameToMapSlots . remove ( status . getTrackerName ( ) ) ; if ( mapSlots == null ) { mapSlots = 0 ; } totalMapTaskCapacity -= mapSlots ; Integer reduceSlots = trackerNameToReduceSlots . remove ( status . getTrackerName ( ) ) ... | Update totalMapTaskCapacity and totalReduceTaskCapacity for removing a tasktracker |
33,266 | void recoverSegments ( SyncTask task ) throws IOException { if ( ! prepareRecovery ( task ) ) { return ; } for ( InetSocketAddress jn : journalNodes ) { if ( isLocalIpAddress ( jn . getAddress ( ) ) && jn . getPort ( ) == journalNode . getPort ( ) ) { continue ; } try { List < EditLogFile > remoteLogFiles = getManifest... | Recovers a single segment |
33,267 | private List < EditLogFile > getManifest ( InetSocketAddress jn , Journal journal , long minTxId ) throws IOException { String m = DFSUtil . getHTMLContentWithTimeout ( new URL ( "http" , jn . getAddress ( ) . getHostAddress ( ) , jn . getPort ( ) , GetJournalManifestServlet . buildPath ( journal . getJournalId ( ) , m... | Fetch manifest from a single given journal node over http . |
33,268 | public static List < EditLogFile > convertJsonToListManifest ( String json ) throws IOException { if ( json == null || json . isEmpty ( ) ) { return new ArrayList < EditLogFile > ( ) ; } TypeReference < List < String > > type = new TypeReference < List < String > > ( ) { } ; List < String > logFilesDesc = mapper . read... | Get the map corresponding to the JSON string . |
33,269 | private boolean isLocalIpAddress ( InetAddress addr ) { if ( addr . isAnyLocalAddress ( ) || addr . isLoopbackAddress ( ) ) return true ; try { return NetworkInterface . getByInetAddress ( addr ) != null ; } catch ( SocketException e ) { return false ; } } | Checks if the address is local . |
33,270 | synchronized void refreshAcls ( Configuration conf ) throws IOException { try { HashMap < String , AccessControlList > newAclsMap = getQueueAcls ( conf ) ; aclsMap = newAclsMap ; } catch ( Throwable t ) { String exceptionString = StringUtils . stringifyException ( t ) ; LOG . warn ( "Queue ACLs could not be refreshed b... | Refresh the acls for the configured queues in the system by reading it from mapred - queue - acls . xml . |
33,271 | public Socket get ( SocketAddress remote ) { synchronized ( multimap ) { List < Socket > sockList = multimap . get ( remote ) ; if ( sockList == null ) { return null ; } Iterator < Socket > iter = sockList . iterator ( ) ; while ( iter . hasNext ( ) ) { Socket candidate = iter . next ( ) ; iter . remove ( ) ; if ( ! ca... | Get a cached socket to the given address |
33,272 | public void put ( Socket sock ) { Preconditions . checkNotNull ( sock ) ; SocketAddress remoteAddr = sock . getRemoteSocketAddress ( ) ; if ( remoteAddr == null ) { LOG . warn ( "Cannot cache (unconnected) socket with no remote address: " + sock ) ; IOUtils . closeSocket ( sock ) ; return ; } Socket oldestSock = null ;... | Give an unused socket to the cache . |
33,273 | private Socket evictOldest ( ) { Iterator < Entry < SocketAddress , Socket > > iter = multimap . entries ( ) . iterator ( ) ; if ( ! iter . hasNext ( ) ) { throw new IllegalArgumentException ( "Cannot evict from empty cache!" ) ; } Entry < SocketAddress , Socket > entry = iter . next ( ) ; iter . remove ( ) ; return en... | Evict the oldest entry in the cache . |
33,274 | public void clear ( ) { List < Socket > socketsToClear = new LinkedList < Socket > ( ) ; synchronized ( multimap ) { for ( Socket sock : multimap . values ( ) ) { socketsToClear . add ( sock ) ; } multimap . clear ( ) ; } for ( Socket sock : socketsToClear ) { IOUtils . closeSocket ( sock ) ; } } | Empty the cache and close all sockets . |
33,275 | public static void writeFile ( File file , long val ) throws IOException { AtomicFileOutputStream fos = new AtomicFileOutputStream ( file ) ; try { fos . write ( String . valueOf ( val ) . getBytes ( Charsets . UTF_8 ) ) ; fos . write ( '\n' ) ; fos . close ( ) ; fos = null ; } finally { if ( fos != null ) { fos . abor... | Atomically write the given value to the given file including fsyncing . |
33,276 | static public String normalize ( String path ) { if ( path == null || path . length ( ) == 0 ) return ROOT ; if ( path . charAt ( 0 ) != PATH_SEPARATOR ) { throw new IllegalArgumentException ( "Network Location path does not start with " + PATH_SEPARATOR_STR + ": " + path ) ; } int len = path . length ( ) ; if ( path .... | Normalize a path |
33,277 | private void recoverClusterManagerFromDisk ( HostsFileReader hostsReader ) throws IOException { LOG . info ( "Restoring state from " + new java . io . File ( conf . getCMStateFile ( ) ) . getAbsolutePath ( ) ) ; safeMode = true ; LOG . info ( "Safe mode is now: " + ( this . safeMode ? "ON" : "OFF" ) ) ; CoronaSerialize... | This method is used when the ClusterManager is restarting after going down while in Safe Mode . It starts the process of recovering the original CM state by reading back the state in JSON form . |
33,278 | protected void initLegalTypes ( ) { Map < Integer , Map < ResourceType , Integer > > cpuToResourcePartitioning = conf . getCpuToResourcePartitioning ( ) ; for ( Map . Entry < Integer , Map < ResourceType , Integer > > entry : cpuToResourcePartitioning . entrySet ( ) ) { for ( ResourceType type : entry . getValue ( ) . ... | Prepare the legal types allowed based on the resources available |
33,279 | protected boolean checkResourceRequestType ( List < ResourceRequest > requestList ) { for ( ResourceRequest req : requestList ) { if ( ! legalTypeSet . contains ( req . type ) ) { return false ; } } return true ; } | Check all the resource requests and ensure that they are legal . |
33,280 | protected void checkResourceRequestLimit ( List < ResourceRequest > requestList , String handle ) throws InvalidSessionHandle { ConfigManager configManager = getScheduler ( ) . getConfigManager ( ) ; Session session = sessionManager . getSession ( handle ) ; PoolInfo poolInfo = session . getPoolInfo ( ) ; if ( ! config... | Count the resources requested and fail the job if they are above the limit |
33,281 | public synchronized boolean setSafeMode ( boolean safeMode ) { if ( safeMode == false ) { LOG . info ( "Resetting the heartbeat times for all sessions" ) ; sessionManager . resetSessionsLastHeartbeatTime ( ) ; LOG . info ( "Resetting the heartbeat times for all nodes" ) ; nodeManager . resetNodesLastHeartbeatTime ( ) ;... | Sets the Safe Mode flag on the Cluster Manager and on the ProxyJobTracker . If we fail to set the flag on the ProxyJobTracker return false which signals that setting the flag on the ProxyJobTracker failed . In that case we should run coronaadmin with the - forceSetSafeModeOnPJT or - forceUnsetSafeModeOnPJT options . |
33,282 | public boolean persistState ( ) { if ( ! safeMode ) { LOG . info ( "Cannot persist state because ClusterManager is not in Safe Mode" ) ; return false ; } try { JsonGenerator jsonGenerator = CoronaSerializer . createJsonGenerator ( conf ) ; jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "startTi... | This function saves the state of the ClusterManager to disk . |
33,283 | public void nodeTimeout ( String nodeName ) { if ( nodeRestarter != null ) { nodeRestarter . delete ( nodeName ) ; } Set < String > sessions = nodeManager . getNodeSessions ( nodeName ) ; Set < ClusterNode . GrantId > grantsToRevoke = nodeManager . deleteNode ( nodeName ) ; if ( grantsToRevoke == null ) { return ; } ha... | This is an internal api called to tell the cluster manager that a a particular node seems dysfunctional and that it should be removed from the cluster . |
33,284 | public void nodeAppRemoved ( String nodeName , ResourceType type ) { Set < String > sessions = nodeManager . getNodeSessions ( nodeName ) ; Set < ClusterNode . GrantId > grantsToRevoke = nodeManager . deleteAppFromNode ( nodeName , type ) ; if ( grantsToRevoke == null ) { return ; } Set < String > affectedSessions = ne... | This is an internal api called to tell the cluster manager that a particular type of resource is no longer available on a node . |
33,285 | private void handleRevokedGrants ( String nodeName , Set < ClusterNode . GrantId > grantsToRevoke ) { for ( ClusterNode . GrantId grantId : grantsToRevoke ) { String sessionHandle = grantId . getSessionId ( ) ; try { sessionManager . revokeResource ( sessionHandle , Collections . singletonList ( grantId . getRequestId ... | Process the grants removed from a node . |
33,286 | private void handleDeadNode ( String nodeName , Set < String > sessions ) { LOG . info ( "Notify sessions: " + sessions + " about dead node " + nodeName ) ; for ( String session : sessions ) { sessionNotifier . notifyDeadNode ( session , nodeName ) ; } } | All the sessions that had grants on this node should get notified |
33,287 | private void updateCurrentThreadName ( String status ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "DataXceiver for client " ) ; InetAddress ia ; if ( s != null && ( ia = s . getInetAddress ( ) ) != null ) { sb . append ( ia . toString ( ) ) ; } else { sb . append ( "unknown" ) ; } if ( status != null ) ... | Update the thread name to contain the current status . Use this only after this receiver has started on its thread i . e . outside the constructor . |
33,288 | void readMetadata ( DataInputStream in , VersionAndOpcode versionAndOpcode ) throws IOException { ReadMetadataHeader readMetadataHeader = new ReadMetadataHeader ( versionAndOpcode ) ; readMetadataHeader . readFields ( in ) ; final int namespaceId = readMetadataHeader . getNamespaceId ( ) ; Block block = new Block ( rea... | Reads the metadata and sends the data in one DATA_CHUNK . |
33,289 | void getBlockCrc ( DataInputStream in , VersionAndOpcode versionAndOpcode ) throws IOException { BlockChecksumHeader blockChecksumHeader = new BlockChecksumHeader ( versionAndOpcode ) ; blockChecksumHeader . readFields ( in ) ; final int namespaceId = blockChecksumHeader . getNamespaceId ( ) ; final Block block = new B... | Get block data s CRC32 checksum . |
33,290 | private void copyBlock ( DataInputStream in , VersionAndOpcode versionAndOpcode ) throws IOException { CopyBlockHeader copyBlockHeader = new CopyBlockHeader ( versionAndOpcode ) ; copyBlockHeader . readFields ( in ) ; long startTime = System . currentTimeMillis ( ) ; int namespaceId = copyBlockHeader . getNamespaceId (... | Read a block from the disk and then sends it to a destination . |
33,291 | private void sendResponse ( Socket s , short opStatus , long timeout ) throws IOException { DataOutputStream reply = new DataOutputStream ( NetUtils . getOutputStream ( s , timeout ) ) ; reply . writeShort ( opStatus ) ; reply . flush ( ) ; } | Utility function for sending a response . |
33,292 | public T getNext ( ) throws IOException { try { return mapper . readValue ( jsonParser , clazz ) ; } catch ( EOFException e ) { return null ; } } | Get the next object from the trace . |
33,293 | public static List < List < Object > > runInsertSelect ( DBConnectionFactory connectionFactory , String sql , List < Object > sqlParams , boolean isWrite , int numRetries , int retryMaxInternalSec , boolean insert , boolean getGeneratedKeys ) throws IOException { int waitMS = 3000 ; for ( int i = 0 ; i < numRetries ; +... | returned by the query . In the case of an insert returns null . |
33,294 | public void process ( DocumentAndOp doc , Analyzer analyzer ) throws IOException { if ( doc . getOp ( ) == DocumentAndOp . Op . DELETE || doc . getOp ( ) == DocumentAndOp . Op . UPDATE ) { deleteList . add ( doc . getTerm ( ) ) ; } if ( doc . getOp ( ) == DocumentAndOp . Op . INSERT || doc . getOp ( ) == DocumentAndOp ... | This method is used by the index update mapper and process a document operation into the current intermediate form . |
33,295 | protected void ensureInflated ( ) { if ( compressed != null ) { try { ByteArrayInputStream deflated = new ByteArrayInputStream ( compressed ) ; DataInput inflater = new DataInputStream ( new InflaterInputStream ( deflated ) ) ; readFieldsCompressed ( inflater ) ; compressed = null ; } catch ( IOException e ) { throw ne... | Must be called by all methods which access fields to ensure that the data has been uncompressed . |
33,296 | public static synchronized UnixUserGroupInformation getUgiForUser ( String userName ) { long now = System . currentTimeMillis ( ) ; long cutoffTime = now - ugiLifetime ; CachedUgi cachedUgi = ugiCache . get ( userName ) ; if ( cachedUgi != null && cachedUgi . getInitTime ( ) > cutoffTime ) return cachedUgi . getUgi ( )... | retrieve an ugi for a user . try the cache first if not found get it by running a shell command |
33,297 | static synchronized void saveToCache ( UnixUserGroupInformation ugi ) { ugiCache . put ( ugi . getUserName ( ) , new CachedUgi ( ugi , System . currentTimeMillis ( ) ) ) ; } | save an ugi to cache only for junit testing purposes |
33,298 | private static UnixUserGroupInformation getUgi ( String userName ) throws IOException { if ( userName == null || ! USERNAME_PATTERN . matcher ( userName ) . matches ( ) ) throw new IOException ( "Invalid username=" + userName ) ; String [ ] cmd = new String [ ] { "bash" , "-c" , "id -Gn '" + userName + "'" } ; String [... | Get the ugi for a user by running shell command id - Gn |
33,299 | void start ( ) { if ( ! shouldRun ( conf ) ) { LOG . info ( "Not starting node health monitor" ) ; return ; } nodeHealthScriptScheduler = new Timer ( "NodeHealthMonitor-Timer" , true ) ; nodeHealthScriptScheduler . scheduleAtFixedRate ( timer , 0 , intervalTime ) ; } | Method used to start the Node health monitoring . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.