idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
32,700 | private void visitTxId ( ) throws IOException { if ( LayoutVersion . supports ( Feature . STORED_TXIDS , editsVersion ) ) { v . visitLong ( EditsElement . TRANSACTION_ID ) ; } } | Visit a transaction ID if the log version supports it . |
32,701 | private void visit_OP_ADD_or_OP_CLOSE ( FSEditLogOpCodes editsOpCode ) throws IOException { visitTxId ( ) ; if ( ! LayoutVersion . supports ( Feature . EDITLOG_OP_OPTIMIZATION , editsVersion ) ) { IntToken opAddLength = v . visitInt ( EditsElement . LENGTH ) ; if ( opAddLength . value == 0 ) { throw new IOException ( "... | Visit OP_ADD and OP_CLOSE they are almost the same |
32,702 | public void loadEdits ( ) throws IOException { try { v . start ( ) ; v . visitEnclosingElement ( EditsElement . EDITS ) ; IntToken editsVersionToken = v . visitInt ( EditsElement . EDITS_VERSION ) ; editsVersion = editsVersionToken . value ; if ( ! canLoadVersion ( editsVersion ) ) { throw new IOException ( "Cannot pro... | Loads edits file uses visitor to process all elements |
32,703 | private static ResourceBundle getResourceBundle ( String enumClassName ) { String bundleName = enumClassName . replace ( '$' , '_' ) ; return ResourceBundle . getBundle ( bundleName ) ; } | Returns the specified resource bundle or throws an exception . |
32,704 | protected Counter findCounter ( String counterName , String displayName ) { Counter result = counters . get ( counterName ) ; if ( result == null ) { result = new Counter ( counterName , displayName ) ; counters . put ( counterName , result ) ; } return result ; } | Internal to find a counter in a group . |
32,705 | private String localize ( String key , String defaultValue ) { String result = defaultValue ; if ( bundle != null ) { try { result = bundle . getString ( key ) ; } catch ( MissingResourceException mre ) { } } return result ; } | Looks up key in the ResourceBundle and returns the corresponding value . If the bundle or the key doesn t exist returns the default value . |
32,706 | public static String getDefaultAddress ( Configuration conf ) { URI uri = FileSystem . getDefaultUri ( conf ) ; String authority = uri . getAuthority ( ) ; if ( authority == null ) { throw new IllegalArgumentException ( String . format ( "Invalid URI for NameNode address (check %s): %s has no authority." , FileSystem .... | Returns the fs . default name from the configuration as a string for znode name retrieval without a DNS lookup . |
32,707 | protected void initialize ( ) throws IOException { if ( serviceAuthEnabled = getConf ( ) . getBoolean ( ServiceAuthorizationManager . SERVICE_AUTHORIZATION_CONFIG , false ) ) { PolicyProvider policyProvider = ( PolicyProvider ) ( ReflectionUtils . newInstance ( getConf ( ) . getClass ( PolicyProvider . POLICY_PROVIDER_... | Initialize name - node . |
32,708 | protected static void adjustMetaDirectoryNames ( Configuration conf , String serviceKey ) { adjustMetaDirectoryName ( conf , DFS_NAMENODE_NAME_DIR_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_NAMENODE_EDITS_DIR_KEY , serviceKey ) ; adjustMetaDirectoryName ( conf , DFS_NAMENODE_CHECKPOINT_DIR_KEY , serviceK... | Append service name to each meta directory name |
32,709 | protected void stopRPC ( boolean interruptClientHandlers ) throws IOException , InterruptedException { stopRPCInternal ( server , "client" , interruptClientHandlers ) ; stopRPCInternal ( dnProtocolServer , "datanode" , interruptClientHandlers ) ; stopWaitRPCInternal ( server , "client" ) ; stopWaitRPCInternal ( dnProto... | Quiescess all communication to namenode cleanly . Ensures all RPC handlers have exited . |
32,710 | public void stop ( ) { if ( stopRequested ) return ; stopRequested = true ; LOG . info ( "Stopping http server" ) ; try { if ( httpServer != null ) httpServer . stop ( ) ; } catch ( Exception e ) { LOG . error ( StringUtils . stringifyException ( e ) ) ; } LOG . info ( "Stopping namesystem" ) ; if ( namesystem != null ... | Stop all NameNode threads and wait for all to finish . |
32,711 | public LocatedBlock addBlock ( String src , String clientName ) throws IOException { return addBlock ( src , clientName , null ) ; } | Stub for 0 . 20 clients that don t support HDFS - 630 |
32,712 | public void abandonBlock ( Block b , String src , String holder ) throws IOException { abandonBlockInternal ( b , src , holder ) ; } | The client needs to give up on the block . |
32,713 | public DatanodeCommand [ ] sendHeartbeat ( DatanodeRegistration nodeReg , long capacity , long dfsUsed , long remaining , long namespaceUsed , int xmitsInProgress , int xceiverCount ) throws IOException { verifyRequest ( nodeReg ) ; myMetrics . numHeartbeat . inc ( ) ; return namesystem . handleHeartbeat ( nodeReg , ca... | Data node notify the name node that it is alive Return an array of block - oriented commands for the datanode to execute . This will be either a transfer or a delete operation . |
32,714 | public void blocksBeingWrittenReport ( DatanodeRegistration nodeReg , BlockReport blocks ) throws IOException { verifyRequest ( nodeReg ) ; long [ ] blocksAsLong = blocks . getBlockReportInLongs ( ) ; BlockListAsLongs blist = new BlockListAsLongs ( blocksAsLong ) ; boolean processed = namesystem . processBlocksBeingWri... | add new replica blocks to the Inode to target mapping also add the Inode file to DataNodeDesc |
32,715 | public void verifyRequest ( DatanodeRegistration nodeReg ) throws IOException { verifyVersion ( nodeReg . getVersion ( ) , LAYOUT_VERSION , "layout" ) ; if ( getNamespaceID ( ) != nodeReg . storageInfo . namespaceID || getCTime ( ) < nodeReg . storageInfo . cTime ) { LOG . warn ( "Invalid Request : NN namespaceId, cTim... | Verify request . |
32,716 | public static void verifyVersion ( int reportedVersion , int expectedVersion , String annotation ) throws IOException { if ( ( reportedVersion ^ expectedVersion ) < 0 ) { throw new IOException ( "reportedVersion and expectedVersion have" + " different signs : " + reportedVersion + ", " + expectedVersion ) ; } if ( Math... | Verify version . |
32,717 | public List < FileStatusExtended > getRandomFilesSample ( double percentage ) { if ( ! ( percentage > 0 && percentage <= 1.0 ) ) { throw new IllegalArgumentException ( "Invalid percentage : " + percentage + " value should be between (0 - 1.0]" ) ; } LOG . info ( "Sampling : " + ( percentage * 100 ) + " percent of files... | Get a sample of the total files in the FileSystem . The sampling is done randomly . |
32,718 | static boolean format ( Configuration conf , boolean force , boolean isConfirmationNeeded ) throws IOException { boolean allowFormat = conf . getBoolean ( "dfs.namenode.support.allowformat" , true ) ; if ( ! allowFormat ) { throw new IOException ( "The option dfs.namenode.support.allowformat is " + "set to false for th... | Verify that configured directories exist then Interactively confirm that formatting is desired for each existing directory and format them . |
32,719 | public static boolean validateServiceName ( Configuration conf , String nameServiceId ) { Collection < String > nameserviceIds = DFSUtil . getNameServiceIds ( conf ) ; if ( nameserviceIds != null && ! nameserviceIds . isEmpty ( ) ) { if ( nameServiceId == null ) { System . err . println ( "Need to input a nameservice i... | Valide if the input service name is valid |
32,720 | protected void validateCheckpointerAddress ( InetAddress configuredRemoteAddress ) throws IOException { InetAddress remoteAddress = Server . getRemoteIp ( ) ; InjectionHandler . processEvent ( InjectionEvent . NAMENODE_VERIFY_CHECKPOINTER , remoteAddress ) ; LOG . info ( "Verify: received request from: " + remoteAddres... | Checks if the ip of the caller is equal to the given configured address . |
32,721 | public static boolean checkFile ( Configuration conf , FileSystem srcFs , FileSystem parityFs , Path srcPath , Path parityPath , Codec codec , Progressable reporter , boolean sourceOnly ) throws IOException , InterruptedException { FileStatus stat = srcFs . getFileStatus ( srcPath ) ; long blockSize = stat . getBlockSi... | Check a file . |
32,722 | private static boolean verifyFile ( Configuration conf , FileSystem srcFs , FileSystem parityFs , FileStatus stat , Path parityPath , Codec codec , long blockOffset , Progressable reporter ) throws IOException , InterruptedException { Path srcPath = stat . getPath ( ) ; LOG . info ( "Verify file: " + srcPath + " at off... | Verify the certain offset of a file . |
32,723 | protected void addLocatedInputPathRecursively ( List < LocatedFileStatus > result , FileSystem fs , Path path , PathFilter inputFilter ) throws IOException { for ( RemoteIterator < LocatedFileStatus > itor = fs . listLocatedStatus ( path , inputFilter ) ; itor . hasNext ( ) ; ) { LocatedFileStatus stat = itor . next ( ... | Add files in the input path recursively into the results . |
32,724 | private static String [ ] getPathStrings ( String commaSeparatedPaths ) { int length = commaSeparatedPaths . length ( ) ; int curlyOpen = 0 ; int pathStart = 0 ; boolean globPattern = false ; List < String > pathStrings = new ArrayList < String > ( ) ; for ( int i = 0 ; i < length ; i ++ ) { char ch = commaSeparatedPat... | This method escapes commas in the glob pattern of the given paths . |
32,725 | protected String [ ] getSplitHosts ( BlockLocation [ ] blkLocations , long offset , long splitSize , NetworkTopology clusterMap ) throws IOException { int startIndex = getBlockIndex ( blkLocations , offset ) ; long bytesInThisBlock = blkLocations [ startIndex ] . getOffset ( ) + blkLocations [ startIndex ] . getLength ... | This function identifies and returns the hosts that contribute most for a given split . For calculating the contribution rack locality is treated on par with host locality so hosts from racks that contribute the most are preferred over hosts on racks that contribute less |
32,726 | @ SuppressWarnings ( "deprecation" ) public void killJob ( JobID jobId , Map < String , InetAddress > allTrackers ) { for ( Map . Entry < String , InetAddress > entry : allTrackers . entrySet ( ) ) { String trackerName = entry . getKey ( ) ; InetAddress addr = entry . getValue ( ) ; String description = "KillJobAction ... | Enqueue an action to kill the job . |
32,727 | public void killTasks ( String trackerName , InetAddress addr , List < KillTaskAction > killActions ) { for ( KillTaskAction killAction : killActions ) { String description = "KillTaskAction " + killAction . getTaskID ( ) ; LOG . info ( "Queueing " + description + " to worker " + trackerName + "(" + addr . host + ":" +... | Enqueue kill tasks actions . |
32,728 | public void commitTask ( String trackerName , InetAddress addr , CommitTaskAction action ) { String description = "KillTaskAction " + action . getTaskID ( ) ; LOG . info ( "Queueing " + description + " to worker " + trackerName + "(" + addr . host + ":" + addr . port + ")" ) ; allWorkQueues . enqueueAction ( new Action... | Enqueue a commit task action . |
32,729 | public void launchTask ( Task task , String trackerName , InetAddress addr ) { CoronaSessionInfo info = new CoronaSessionInfo ( coronaJT . getSessionId ( ) , coronaJT . getJobTrackerAddress ( ) , coronaJT . getSecondaryTrackerAddress ( ) ) ; LaunchTaskAction action = new LaunchTaskAction ( task , info ) ; String descri... | Enqueue a launch task action . |
32,730 | boolean connectionStateChanged ( int newState ) { switch ( newState ) { case ConnectionManager . CONNECTED : LOG . info ( listeningPort + ": Switched to CONNECTED state." ) ; try { return resubscribe ( ) ; } catch ( Exception e ) { LOG . error ( listeningPort + ": Resubscribing failed" , e ) ; return false ; } case Con... | Called by the ConnectionManager when the connection state changed . The connection lock is hold when calling this method so no other methods from the ConnectionManager should be called here . |
32,731 | public void removeWatch ( String path , EventType watchType ) throws NotConnectedToServerException , InterruptedException , WatchNotPlacedException { NamespaceEvent event = new NamespaceEvent ( path , watchType . getByteValue ( ) ) ; NamespaceEventKey eventKey = new NamespaceEventKey ( path , watchType ) ; Object conne... | Removes a previously placed watch for a particular event type from the given path . If the watch is not actually present at that path before calling the method nothing will happen . |
32,732 | public boolean haveWatch ( String path , EventType watchType ) { return watchedEvents . containsKey ( new NamespaceEventKey ( path , watchType ) ) ; } | Tests if a watch is placed at the given path and of the given type . |
32,733 | private boolean resubscribe ( ) throws TransactionIdTooOldException , InterruptedException { for ( NamespaceEventKey eventKey : watchedEvents . keySet ( ) ) { NamespaceEvent event = eventKey . getEvent ( ) ; if ( ! subscribe ( event . getPath ( ) , EventType . fromByteValue ( event . getType ( ) ) , watchedEvents . get... | Called right after a reconnect to resubscribe to all events . Must be called with the connection lock acquired . |
32,734 | void waitForTransparentConnect ( ) throws InterruptedException , NotConnectedToServerException { if ( state == DISCONNECTED_VISIBLE ) { LOG . warn ( listeningPort + ": waitForTransparentConnect: got visible" + " disconnected state" ) ; throw new NotConnectedToServerException ( ) ; } while ( state != CONNECTED ) { conne... | Must be called holding the connection lock returned by getConnectionLock . It waits until the current connection state is CONNECTED . If it ever gets to DISCONNECTED_VISIBLE it will raise an exception . If the current state is CONNECTED then it will return without waiting . |
32,735 | public boolean isMethodSupported ( String methodName , Class < ? > ... parameterTypes ) throws IOException { if ( serverMethods == null ) { return true ; } Method method ; try { method = protocol . getDeclaredMethod ( methodName , parameterTypes ) ; } catch ( SecurityException e ) { throw new IOException ( e ) ; } catc... | Check if a method is supported by the server or not |
32,736 | public static void writePartitionFile ( JobConf conf , Path partFile ) throws IOException { TeraInputFormat inFormat = new TeraInputFormat ( ) ; TextSampler sampler = new TextSampler ( ) ; Text key = new Text ( ) ; Text value = new Text ( ) ; int partitions = conf . getNumReduceTasks ( ) ; long sampleSize = conf . getL... | Use the input splits to take samples of the input and generate sample keys . By default reads 100 000 keys from 10 locations in the input sorts them and picks N - 1 keys to generate N equally sized partitions . |
32,737 | public static void setConf ( Object theObject , Configuration conf , boolean supportJobConf ) { if ( conf != null ) { if ( theObject instanceof Configurable ) { ( ( Configurable ) theObject ) . setConf ( conf ) ; } if ( supportJobConf ) { setJobConf ( theObject , conf ) ; } } } | Check and set configuration if necessary . |
32,738 | public static < T > T newInstance ( Class < T > theClass , Class < ? > [ ] parameterTypes , Object [ ] initargs ) { if ( parameterTypes . length != initargs . length ) { throw new IllegalArgumentException ( "Constructor parameter types don't match constructor arguments" ) ; } for ( int i = 0 ; i < parameterTypes . leng... | Create an object for the given class . |
32,739 | public static void logThreadInfo ( Log log , String title , long minInterval ) { boolean dumpStack = false ; if ( log . isInfoEnabled ( ) ) { synchronized ( ReflectionUtils . class ) { long now = System . currentTimeMillis ( ) ; if ( now - previousLogTime >= minInterval * 1000 ) { previousLogTime = now ; dumpStack = tr... | Log the current thread stacks at INFO level . |
32,740 | public static void backupFiles ( FileSystem fs , File dest , Configuration conf ) throws IOException { cleanUpAndCheckBackup ( conf , dest ) ; int MAX_ATTEMPT = 3 ; for ( int i = 0 ; i < MAX_ATTEMPT ; i ++ ) { try { String mdate = dateForm . get ( ) . format ( new Date ( System . currentTimeMillis ( ) ) ) ; if ( dest .... | Backup given directory . Enforce that max number of backups has not been reached . |
32,741 | static void cleanUpAndCheckBackup ( Configuration conf , File origin ) throws IOException { String [ ] backups = getBackups ( origin ) ; File root = origin . getParentFile ( ) ; int copiesToKeep = conf . getInt ( NN_IMAGE_COPIES_TOKEEP , NN_IMAGE_COPIES_TOKEEP_DEFAULT ) ; int daysToKeep = conf . getInt ( NN_IMAGE_DAYS_... | Check if we have not exceeded the maximum number of backups . |
32,742 | static void deleteOldBackups ( File root , String [ ] backups , int daysToKeep , int copiesToKeep ) { Date now = new Date ( System . currentTimeMillis ( ) ) ; int maxIndex = Math . max ( 0 , backups . length - copiesToKeep + 1 ) ; for ( int i = 0 ; i < maxIndex ; i ++ ) { String backup = backups [ i ] ; Date backupDate... | Delete backups according to the retention policy . |
32,743 | static String [ ] getBackups ( File origin ) { File root = origin . getParentFile ( ) ; final String originName = origin . getName ( ) ; String [ ] backups = root . list ( new FilenameFilter ( ) { public boolean accept ( File dir , String name ) { if ( ! name . startsWith ( originName + File . pathSeparator ) || name .... | List all directories that match the backup pattern . Sort from oldest to newest . |
32,744 | void doImportCheckpoint ( ) throws IOException { Collection < URI > checkpointDirs = NNStorageConfiguration . getCheckpointDirs ( conf , null ) ; Collection < URI > checkpointEditsDirs = NNStorageConfiguration . getCheckpointEditsDirs ( conf , null ) ; if ( checkpointDirs == null || checkpointDirs . isEmpty ( ) ) { thr... | Load image from a checkpoint directory and save it into the current one . |
32,745 | protected void loadFSImage ( ImageInputStream iis , File imageFile ) throws IOException { MD5Hash expectedMD5 = MD5FileUtils . readStoredMd5ForFile ( imageFile ) ; if ( expectedMD5 == null ) { throw new IOException ( "No MD5 file found corresponding to image file " + imageFile ) ; } iis . setImageDigest ( expectedMD5 )... | Load the image namespace from the given image file verifying it against the MD5 sum stored in its associated . md5 file . |
32,746 | String getParent ( String path ) { return path . substring ( 0 , path . lastIndexOf ( Path . SEPARATOR ) ) ; } | Return string representing the parent of the given path . |
32,747 | protected long loadEdits ( Iterable < EditLogInputStream > editStreams ) throws IOException { long lastAppliedTxId = storage . getMostRecentCheckpointTxId ( ) ; int numLoaded = 0 ; FSEditLogLoader loader = new FSEditLogLoader ( namesystem ) ; for ( EditLogInputStream editIn : editStreams ) { FLOG . info ( "Load Image: ... | Load the specified list of edit files into the image . |
32,748 | void saveFSImage ( SaveNamespaceContext context , ImageManager im , boolean forceUncompressed ) throws IOException { long txid = context . getTxId ( ) ; OutputStream os = im . getCheckpointOutputStream ( txid ) ; FSImageFormat . Saver saver = new FSImageFormat . Saver ( context ) ; FSImageCompression compression = FSIm... | Save the contents of the FS image to the file . |
32,749 | public synchronized void saveNamespace ( boolean forUncompressed ) throws IOException { InjectionHandler . processEvent ( InjectionEvent . FSIMAGE_STARTING_SAVE_NAMESPACE ) ; if ( editLog == null ) { throw new IOException ( "editLog must be initialized" ) ; } storage . attemptRestoreRemovedStorage ( ) ; InjectionHandle... | Save the contents of the FS image to a new image file in each of the current storage directories . |
32,750 | boolean confirmFormat ( boolean force , boolean interactive ) throws IOException { List < FormatConfirmable > confirms = Lists . newArrayList ( ) ; for ( StorageDirectory sd : storage . dirIterable ( null ) ) { confirms . add ( sd ) ; } confirms . addAll ( editLog . getFormatConfirmables ( ) ) ; return Storage . confir... | Check whether the storage directories and non - file journals exist . If running in interactive mode will prompt the user for each directory to allow them to format anyway . Otherwise returns false unless force is specified . |
32,751 | void rollFSImage ( CheckpointSignature sig ) throws IOException { long start = System . nanoTime ( ) ; sig . validateStorageInfo ( this . storage ) ; saveDigestAndRenameCheckpointImage ( sig . mostRecentCheckpointTxId , sig . imageDigest ) ; long rollTime = DFSUtil . getElapsedTimeMicroSeconds ( start ) ; if ( metrics ... | End checkpoint . Validate the current storage info with the given signature . |
32,752 | synchronized void saveDigestAndRenameCheckpointImage ( long txid , MD5Hash digest ) throws IOException { if ( ! digest . equals ( storage . getCheckpointImageDigest ( txid ) ) ) { throw new IOException ( "Checkpoint image is corrupt: expecting an MD5 checksum of" + digest + " but is " + storage . getCheckpointImageDige... | This is called by the 2NN after having downloaded an image and by the NN after having received a new image from the 2NN . It renames the image from fsimage_N . ckpt to fsimage_N and also saves the related . md5 file into place . |
32,753 | static Collection < File > getCheckpointDirs ( Configuration conf , String defaultName ) { Collection < String > dirNames = conf . getStringCollection ( "fs.checkpoint.dir" ) ; if ( dirNames . size ( ) == 0 && defaultName != null ) { dirNames . add ( defaultName ) ; } Collection < File > dirs = new ArrayList < File > (... | Retrieve checkpoint dirs from configuration . |
32,754 | private void printJobs ( ArrayList < JobInProgress > jobsToInitialize ) { for ( JobInProgress job : jobsToInitialize ) { LOG . info ( "Passing to Initializer Job Id :" + job . getJobID ( ) + " User: " + job . getProfile ( ) . getUser ( ) + " Queue : " + job . getProfile ( ) . getQueueName ( ) ) ; } } | Method used to print log statements about which jobs are being passed to init - threads . |
32,755 | private void assignThreadsToQueues ( ) { int countOfQueues = jobQueues . size ( ) ; String [ ] queues = ( String [ ] ) jobQueues . keySet ( ) . toArray ( new String [ countOfQueues ] ) ; int numberOfQueuesPerThread = countOfQueues / poolSize ; int numberOfQueuesAssigned = 0 ; for ( int i = 0 ; i < poolSize ; i ++ ) { J... | Method which is used by the poller to assign appropriate worker thread to a queue . The number of threads would be always less than or equal to number of queues in a system . If number of threads is configured to be more than number of queues then poller does not create threads more than number of queues . |
32,756 | void cleanUpInitializedJobsList ( ) { Iterator < Entry < JobID , JobInProgress > > jobsIterator = initializedJobs . entrySet ( ) . iterator ( ) ; while ( jobsIterator . hasNext ( ) ) { Entry < JobID , JobInProgress > entry = jobsIterator . next ( ) ; JobInProgress job = entry . getValue ( ) ; if ( job . getStatus ( ) .... | Method which is used internally to clean up the initialized jobs data structure which the job initialization poller uses to check if a job is initalized or not . |
32,757 | private boolean isScheduled ( JobInProgress job ) { return ( ( job . pendingMaps ( ) < job . desiredMaps ( ) ) || ( job . pendingReduces ( ) < job . desiredReduces ( ) ) ) ; } | Convenience method to check if job has been scheduled or not . |
32,758 | void readHeader ( ) throws IOException { int version = in . readInt ( ) ; if ( version != BlockCrcInfoWritable . LATEST_BLOCK_CRC_FILE_VERSION ) { throw new IOException ( "Version " + version + " is not supported." ) ; } numBuckets = in . readInt ( ) ; currentBucket = - 1 ; numRecordsReadInBucket = 0 ; numRecordsInBuck... | Read header of the file |
32,759 | int moveToNextRecordAndGetItsBucketId ( ) throws IOException { while ( numRecordsReadInBucket >= numRecordsInBucket ) { if ( currentBucket + 1 >= numBuckets ) { return - 1 ; } else { numRecordsInBucket = in . readInt ( ) ; currentBucket ++ ; numRecordsReadInBucket = 0 ; } } return currentBucket ; } | Find the bucket ID for the next record . If current bucket hasn t yet been finished then the current bucket ID will be returned . Otherwise it will keep reading the input file until it finds the next non - empty bucket and return this bucket s ID . |
32,760 | BlockCrcInfoWritable getNextRecord ( ) throws IOException { if ( moveToNextRecordAndGetItsBucketId ( ) == - 1 ) { return null ; } BlockCrcInfoWritable crcInfo = new BlockCrcInfoWritable ( ) ; crcInfo . readFields ( in ) ; numRecordsReadInBucket ++ ; return crcInfo ; } | Get information for the next blockCRC record . NULL if not more left . |
32,761 | public static Job startOneJob ( Worker newWorker , Priority pri , Set < String > jobFiles , long detectTime , AtomicLong numFilesSubmitted , AtomicLong lastCheckingTime , long maxPendingJobs ) throws IOException , InterruptedException , ClassNotFoundException { if ( lastCheckingTime != null ) { lastCheckingTime . set (... | Return true if succeed to start one job |
32,762 | private Map < Integer , Integer > getLostStripes ( Configuration conf , FileStatus stat , FileSystem fs ) throws IOException { Map < Integer , Integer > lostStripes = new HashMap < Integer , Integer > ( ) ; RaidInfo raidInfo = RaidUtils . getFileRaidInfo ( stat , conf ) ; if ( raidInfo . codec == null ) { return lostSt... | Get the lost blocks numbers per stripe in the source file . |
32,763 | protected Map < String , Integer > getLostFiles ( Pattern pattern , String [ ] dfsckArgs ) throws IOException { Map < String , Integer > lostFiles = new HashMap < String , Integer > ( ) ; BufferedReader reader = getLostFileReader ( dfsckArgs ) ; String line = reader . readLine ( ) ; while ( ( line = reader . readLine (... | Gets a list of lost files from the name node via DFSck |
32,764 | public BlockIntegrityMonitor . Status getAggregateStatus ( ) { Status fixer = corruptionWorker . getStatus ( ) ; Status copier = decommissioningWorker . getStatus ( ) ; List < JobStatus > jobs = new ArrayList < JobStatus > ( ) ; List < JobStatus > simFailedJobs = new ArrayList < JobStatus > ( ) ; List < JobStatus > fai... | Get the status of the entire block integrity monitor . The status returned represents the aggregation of the statuses of all the integrity monitor s components . |
32,765 | public final T makeCall ( ) throws IOException { while ( true ) { try { return call ( ) ; } catch ( ConnectException e ) { try { reconnectToNewJobTracker ( 0 ) ; } catch ( IOException f ) { LOG . error ( "Fallback process failed with " , f ) ; throw e ; } } catch ( IOException e ) { handleIOException ( e ) ; } } } | Template function to make the call . Throws if can not fallback . |
32,766 | private final void reconnectToNewJobTracker ( int connectNum ) throws IOException { if ( connectNum >= CONNECT_MAX_NUMBER ) { LOG . error ( "reconnectToNewJobTracker has reached its max number." ) ; throw new IOException ( "reconnectToNewJobTracker has reached its max number." ) ; } InetSocketAddress secondaryTracker =... | Reconnects to new address obtained from secondary address via InterCoronaTrackerProtocol |
32,767 | public static ResourceCalculatorPlugin getResourceCalculatorPlugin ( Class < ? extends ResourceCalculatorPlugin > clazz , Configuration conf ) { if ( clazz != null ) { return ReflectionUtils . newInstance ( clazz , conf ) ; } try { String osName = System . getProperty ( "os.name" ) ; if ( osName . startsWith ( "Linux" ... | Get the ResourceCalculatorPlugin from the class name and configure it . If class name is null this method will try and return a memory calculator plugin available for this system . |
32,768 | private void performReads ( ReadResult readResult ) throws InterruptedException { long start = System . currentTimeMillis ( ) ; for ( int i = 0 ; i < streams . length ; ) { boolean acquired = slots . tryAcquire ( 1 , 10 , TimeUnit . SECONDS ) ; reporter . progress ( ) ; if ( acquired ) { readPool . execute ( new ReadOp... | Performs a batch of reads from the given streams and waits for the reads to finish . |
32,769 | private void logImbalancedNodes ( ) { if ( LOG . isInfoEnabled ( ) ) { int underUtilized = 0 , overUtilized = 0 ; for ( BalancerDatanode node : this . datanodes . values ( ) ) { if ( isUnderUtilized ( node ) ) underUtilized ++ ; else if ( isOverUtilized ( node ) ) overUtilized ++ ; } StringBuilder msg = new StringBuild... | Log the over utilized & under utilized nodes |
32,770 | private void logPlanOutcome ( ) { if ( LOG . isInfoEnabled ( ) ) { LOG . info ( "Predicted plan outcome: bytesLeftToMove: " + bytesLeftToMove + ", bytesToMove: " + bytesToMove ) ; for ( BalancerDatanode node : this . datanodes . values ( ) ) { LOG . info ( node . getName ( ) + " remaining: " + node . getCurrentRemainin... | Log node utilization after the plan execution |
32,771 | private void scheduleTask ( Source source , long size , Target target ) { NodeTask nodeTask = new NodeTask ( target , size ) ; source . addNodeTask ( nodeTask ) ; target . addNodeTask ( nodeTask ) ; sources . add ( source ) ; targets . add ( target ) ; LOG . info ( "scheduled " + size + " bytes : " + source . getName (... | Pairs up given nodes in balancing plan |
32,772 | public static void logDataDistribution ( DatanodeInfo [ ] report ) { if ( LOG . isInfoEnabled ( ) ) { double avgRemaining = computeAvgRemaining ( Arrays . asList ( report ) ) ; StringBuilder msg = new StringBuilder ( "Data distribution report: avgRemaining " + avgRemaining ) ; for ( DatanodeInfo node : report ) { msg .... | Prints data distribution based on report from NameNode |
32,773 | public void setJarByClass ( Class cls ) { String jar = findContainingJar ( cls ) ; if ( jar != null ) { setJar ( jar ) ; } } | Set the job s jar file by finding an example class location . |
32,774 | public void deleteLocalFiles ( ) throws IOException { String [ ] localDirs = getLocalDirs ( ) ; for ( int i = 0 ; i < localDirs . length ; i ++ ) { FileSystem . getLocal ( this ) . delete ( new Path ( localDirs [ i ] ) ) ; } } | Use MRAsyncDiskService . moveAndDeleteAllVolumes instead . |
32,775 | public Path getWorkingDirectory ( ) { String name = get ( "mapred.working.dir" ) ; if ( name != null ) { return new Path ( name ) ; } else { try { Path dir = FileSystem . get ( this ) . getWorkingDirectory ( ) ; set ( "mapred.working.dir" , dir . toString ( ) ) ; return dir ; } catch ( IOException e ) { throw new Runti... | Get the current working directory for the default file system . |
32,776 | public long getMemoryForMapTask ( ) { long value = getDeprecatedMemoryValue ( ) ; if ( value == DISABLED_MEMORY_LIMIT ) { value = normalizeMemoryConfigValue ( getLong ( JobConf . MAPRED_JOB_MAP_MEMORY_MB_PROPERTY , DISABLED_MEMORY_LIMIT ) ) ; } return value ; } | Get memory required to run a map task of the job in MB . |
32,777 | public long getMemoryForReduceTask ( ) { long value = getDeprecatedMemoryValue ( ) ; if ( value == DISABLED_MEMORY_LIMIT ) { value = normalizeMemoryConfigValue ( getLong ( JobConf . MAPRED_JOB_REDUCE_MEMORY_MB_PROPERTY , DISABLED_MEMORY_LIMIT ) ) ; } return value ; } | Get memory required to run a reduce task of the job in MB . |
32,778 | int computeNumSlotsPerMap ( long slotSizePerMap ) { if ( ( slotSizePerMap == DISABLED_MEMORY_LIMIT ) || ( getMemoryForMapTask ( ) == DISABLED_MEMORY_LIMIT ) ) { return 1 ; } return ( int ) ( Math . ceil ( ( float ) getMemoryForMapTask ( ) / ( float ) slotSizePerMap ) ) ; } | Compute the number of slots required to run a single map task - attempt of this job . |
32,779 | int computeNumSlotsPerReduce ( long slotSizePerReduce ) { if ( ( slotSizePerReduce == DISABLED_MEMORY_LIMIT ) || ( getMemoryForReduceTask ( ) == DISABLED_MEMORY_LIMIT ) ) { return 1 ; } return ( int ) ( Math . ceil ( ( float ) getMemoryForReduceTask ( ) / ( float ) slotSizePerReduce ) ) ; } | Compute the number of slots required to run a single reduce task - attempt of this job . |
32,780 | private static String findContainingJar ( Class my_class ) { ClassLoader loader = my_class . getClassLoader ( ) ; String class_file = my_class . getName ( ) . replaceAll ( "\\." , "/" ) + ".class" ; try { for ( Enumeration itr = loader . getResources ( class_file ) ; itr . hasMoreElements ( ) ; ) { URL url = ( URL ) it... | Find a jar that contains a class of the same name if any . It will return a jar file even if that is not the first thing on the class path that has a class with the same name . |
32,781 | public static void overrideConfiguration ( JobConf conf , int instance ) { final String CONFIG_KEYS [ ] = new String [ ] { "mapred.job.tracker" , "mapred.local.dir" , "mapred.fairscheduler.server.address" } ; for ( String configKey : CONFIG_KEYS ) { String value = conf . get ( configKey + "-" + instance ) ; if ( value ... | Replce the jobtracker configuration with the configuration of 0 or 1 instance . This allows switching two sets of configurations in the command line option . |
32,782 | private static String constructMessage ( String property , String newVal , String oldVal ) { String message = "Could not change property " + property ; if ( oldVal != null ) { message += " from \'" + oldVal ; } if ( newVal != null ) { message += "\' to \'" + newVal + "\'" ; } return message ; } | Construct the exception message . |
32,783 | void recoverTransitionRead ( DataNode datanode , NamespaceInfo nsInfo , Collection < File > dataDirs , StartupOption startOpt ) throws IOException { assert FSConstants . LAYOUT_VERSION == nsInfo . getLayoutVersion ( ) : "Block-pool and name-node layout versions must be the same." ; this . storageDirs = new ArrayList < ... | Analyze storage directories . Recover from previous transitions if required . |
32,784 | protected void setFields ( Properties props , StorageDirectory sd ) throws IOException { props . setProperty ( NAMESPACE_ID , String . valueOf ( namespaceID ) ) ; props . setProperty ( CHECK_TIME , String . valueOf ( cTime ) ) ; props . setProperty ( LAYOUT_VERSION , String . valueOf ( layoutVersion ) ) ; } | Set layoutVersion namespaceID and blockpoolID into namespace storage VERSION file |
32,785 | private void setNameSpaceID ( File storage , String nsid ) throws InconsistentFSStateException { if ( nsid == null || nsid . equals ( "" ) ) { throw new InconsistentFSStateException ( storage , "file " + STORAGE_FILE_VERSION + " is invalid." ) ; } int newNsId = Integer . parseInt ( nsid ) ; if ( namespaceID > 0 && name... | Validate and set namespace ID |
32,786 | private void doUpgrade ( List < StorageDirectory > sds , List < StorageInfo > sdsInfo , final NamespaceInfo nsInfo ) throws IOException { assert sds . size ( ) == sdsInfo . size ( ) ; UpgradeThread [ ] upgradeThreads = new UpgradeThread [ sds . size ( ) ] ; for ( int i = 0 ; i < upgradeThreads . length ; i ++ ) { final... | Move current storage into a backup directory and hardlink all its blocks into the new current directory . |
32,787 | public void readFields ( DataInput in ) throws IOException { byte version = in . readByte ( ) ; if ( version != getVersion ( ) ) throw new VersionMismatchException ( getVersion ( ) , version ) ; } | javadoc from Writable |
32,788 | private void failTasksWithMaxMemory ( long memoryToRelease ) { List < TaskAttemptID > allTasks = new ArrayList < TaskAttemptID > ( ) ; allTasks . addAll ( processTreeInfoMap . keySet ( ) ) ; Collections . sort ( allTasks , new Comparator < TaskAttemptID > ( ) { public int compare ( TaskAttemptID tid1 , TaskAttemptID ti... | Starting from the tasks use the highest amount of memory fail the tasks until the memory released meets the requirement |
32,789 | private void killTask ( TaskAttemptID tid , String msg , boolean wasFailure ) { taskTracker . cleanUpOverMemoryTask ( tid , wasFailure , msg ) ; CGroupProcessTreeInfo ptInfo = processTreeInfoMap . get ( tid ) ; try { LinuxSystemCall . killProcessGroup ( Integer . parseInt ( ptInfo . getPID ( ) ) ) ; } catch ( java . io... | Kill the task and clean up CGroupProcessTreeInfo |
32,790 | private boolean isKillable ( TaskAttemptID tid ) { TaskInProgress tip = taskTracker . runningTasks . get ( tid ) ; return tip != null && ! tip . wasKilled ( ) && ( tip . getRunState ( ) == TaskStatus . State . RUNNING || tip . getRunState ( ) == TaskStatus . State . COMMIT_PENDING ) ; } | Check if a task can be killed to increase free memory |
32,791 | public String getResponse ( String [ ] argv ) throws IOException { String result = "" ; if ( argv . length < 1 ) { return result ; } if ( argv [ 0 ] . equals ( "-all" ) ) { result += rpcCollector . getClusterUtilization ( ) ; result += JobUtilization . legendString + JobUtilization . unitString ; for ( JobUtilization j... | Obtain the result to print in command line |
32,792 | void splitKeyVal ( byte [ ] line , Text key , Text val ) throws IOException { int pos = UTF8ByteArrayUtils . findNthByte ( line , ( byte ) this . getFieldSeparator ( ) , this . getNumOfKeyFields ( ) ) ; try { if ( pos == - 1 ) { key . set ( line ) ; val . set ( "" ) ; } else { UTF8ByteArrayUtils . splitKeyVal ( line , ... | Split a line into key and value . Assume the delimitor is a tab . |
32,793 | void write ( Writable value ) throws IOException { byte [ ] bval ; int valSize ; if ( value instanceof BytesWritable ) { BytesWritable val = ( BytesWritable ) value ; bval = val . get ( ) ; valSize = val . getSize ( ) ; } else if ( value instanceof Text ) { Text val = ( Text ) value ; bval = val . getBytes ( ) ; valSiz... | Write a writable value to the output stream using UTF - 8 encoding |
32,794 | private void cleanUpHistory ( ) { long oldestAllowedTimestamp = System . currentTimeMillis ( ) - historyLength ; int trashedNotifications = 0 ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "History cleanup: Checking old notifications to remove from history list ..." ) ; } HistoryTreeEntry key = new HistoryTreeEntry ... | Checks if there are notifications in our tree which are older than historyLength . It removes does which are older . |
32,795 | private void cleanUpHistoryTree ( HistoryNode node ) { if ( node == null || node . children == null ) { return ; } Iterator < HistoryNode > iterator = node . children . iterator ( ) ; while ( iterator . hasNext ( ) ) { HistoryNode child = iterator . next ( ) ; cleanUpHistoryTree ( child ) ; if ( shouldRemoveNode ( chil... | Clean up the Tree by DFS traversal . |
32,796 | private boolean shouldRemoveNode ( HistoryNode node ) { if ( node == null ) { return true ; } int sizeOfChildren = 0 ; if ( node . children != null ) { sizeOfChildren = node . children . size ( ) ; } if ( sizeOfChildren > 0 ) { return false ; } int sizeOfNotifications = 0 ; if ( node . notifications != null ) { for ( L... | Should remove the node from the history tree if both the notifications and children list are empty . |
32,797 | public void storeNotification ( NamespaceNotification notification ) { int notificationsCount = 0 ; historyLock . writeLock ( ) . lock ( ) ; try { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Storing into history: " + NotifierUtils . asString ( notification ) ) ; } String [ ] paths = DFSUtil . split ( notification ... | Called when we should store a notification in the our history . The timestamp used to store it is generated when this method is called . |
32,798 | public void addNotificationsToQueue ( NamespaceEvent event , long txId , Queue < NamespaceNotification > notifications ) throws TransactionIdTooOldException { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Got addNotificationsToQueue for: " + NotifierUtils . asString ( event ) + " and txId: " + txId ) ; } historyLock... | Checks what notifications are saved in history for the given event and adds those notifications in the given queue . Only the notifications which happened strictly after the edit log operations with the given transaction id are put in the queue . The notifications are put in the queue in the order of their transaction ... |
32,799 | private void printProgress ( long read , long size ) { int progress = Math . min ( 100 , ( int ) ( ( 100 * read ) / size ) ) ; if ( progress > lastProgress ) { lastProgress = progress ; System . out . println ( "Completed " + lastProgress + " % " ) ; } } | Print the progress . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.