idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
31,800 | public void registerUpdater ( Scheduler scheduler , SessionNotifier sessionNotifier ) { this . scheduler = scheduler ; this . sessionNotifier = sessionNotifier ; context . registerUpdater ( this ) ; } | Set the scheduler and start the updating . The metrics won t be reported until this is called . |
31,801 | private Map < SessionStatus , MetricsTimeVaryingInt > createSessionStatusToMetricsMap ( ) { Map < SessionStatus , MetricsTimeVaryingInt > m = new HashMap < SessionStatus , MetricsTimeVaryingInt > ( ) ; for ( SessionStatus endState : SESSION_END_STATES ) { String name = endState . toString ( ) . toLowerCase ( ) + "_sess... | Create a map of session status - > metrics . |
31,802 | private Map < ResourceType , MetricsIntValue > createTypeToCountMap ( Collection < ResourceType > resourceTypes , String actionType ) { Map < ResourceType , MetricsIntValue > m = new HashMap < ResourceType , MetricsIntValue > ( ) ; for ( ResourceType t : resourceTypes ) { String name = ( actionType + "_" + t ) . toLowe... | Create a map of resource type - > current count . |
31,803 | private Map < ResourceType , MetricsTimeVaryingLong > createTypeToResourceCountMap ( Collection < ResourceType > resourceTypes , String actionType ) { Map < ResourceType , MetricsTimeVaryingLong > m = new HashMap < ResourceType , MetricsTimeVaryingLong > ( ) ; for ( ResourceType t : resourceTypes ) { String name = ( ac... | Create a map of resource type - > cumulative counts . |
31,804 | private long makeUpRuntime ( List < LoggedDiscreteCDF > mapAttemptCDFs ) { int total = 0 ; for ( LoggedDiscreteCDF cdf : mapAttemptCDFs ) { total += cdf . getNumberValues ( ) ; } if ( total == 0 ) { return - 1 ; } int index = random . nextInt ( total ) ; for ( LoggedDiscreteCDF cdf : mapAttemptCDFs ) { if ( index >= cd... | Perform a weighted random selection on a list of CDFs and produce a random variable using the selected CDF . |
31,805 | public void execute ( ) throws BuildException { if ( src == null && filesets . size ( ) == 0 ) { throw new BuildException ( "There must be a file attribute or a fileset child element" ) ; } if ( src != null ) { doCompile ( src ) ; } Project myProject = getProject ( ) ; for ( int i = 0 ; i < filesets . size ( ) ; i ++ )... | Invoke the Hadoop record compiler on each record definition file |
31,806 | public int compareTo ( Object o ) { long thisValue = this . value ; long thatValue = ( ( VLongWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; } | Compares two VLongWritables . |
31,807 | private static void createInputDirectory ( FileSystem fs , Path dir , Pentomino pent , int depth ) throws IOException { fs . mkdirs ( dir ) ; List < int [ ] > splits = pent . getSplits ( depth ) ; PrintStream file = new PrintStream ( new BufferedOutputStream ( fs . create ( new Path ( dir , "part1" ) ) , 64 * 1024 ) ) ... | Create the input file with all of the possible combinations of the given depth . |
31,808 | public static InetSocketAddress getAddress ( JobConf conf , String key ) { String str = conf . get ( key ) ; if ( str == null ) return null ; String hostPortPair [ ] = str . split ( ":" ) ; if ( hostPortPair . length != 2 ) return null ; return new InetSocketAddress ( hostPortPair [ 0 ] , Integer . parseInt ( hostPortP... | Deserialize InetSocketAddress from String from given key in conf |
31,809 | public static void setAddress ( JobConf conf , String key , InetSocketAddress address ) { if ( address == null ) { conf . unset ( key ) ; return ; } String addrStr = address . getHostName ( ) + ":" + address . getPort ( ) ; conf . set ( key , addrStr ) ; } | Serialize InetSocketAddress to String and saves in given conf |
31,810 | private void snapshotConfig ( ) { maximum = configManager . getPoolGroupMaximum ( getName ( ) , getType ( ) ) ; minimum = configManager . getPoolGroupMinimum ( getName ( ) , getType ( ) ) ; } | Get the snapshot of the configuration from the configuration manager |
31,811 | public Queue < PoolSchedulable > getScheduleQueue ( ) { if ( scheduleQueue == null ) { ScheduleComparator sc = configManager . getPoolGroupComparator ( getName ( ) ) ; scheduleQueue = createPoolQueue ( sc ) ; } return scheduleQueue ; } | Get the queue of pools sorted for scheduling |
31,812 | public Queue < PoolSchedulable > getPreemptQueue ( ) { ScheduleComparator sPreempt = null ; if ( preemptQueue == null ) { ScheduleComparator sc = configManager . getPoolGroupComparator ( getName ( ) ) ; if ( sc == ScheduleComparator . PRIORITY ) { sPreempt = ScheduleComparator . PRIORITY_PREEMPT ; } else { throw new Il... | Get the queue of the pool sorted for preemption |
31,813 | private Queue < PoolSchedulable > createPoolQueue ( ScheduleComparator comparator ) { int initCapacity = snapshotPools . size ( ) == 0 ? 1 : snapshotPools . size ( ) ; Queue < PoolSchedulable > poolQueue = new PriorityQueue < PoolSchedulable > ( initCapacity , comparator ) ; poolQueue . addAll ( snapshotPools ) ; retur... | Put all the pools into the priority queue sorted by a comparator |
31,814 | public PoolSchedulable getPool ( PoolInfo poolInfo ) { PoolSchedulable pool = nameToMap . get ( poolInfo ) ; if ( pool == null ) { pool = new PoolSchedulable ( poolInfo , getType ( ) , configManager ) ; PoolSchedulable prevPool = nameToMap . putIfAbsent ( poolInfo , pool ) ; if ( prevPool != null ) { pool = prevPool ; ... | Get a pool creating it if it does not exist . Note that these pools are never removed . |
31,815 | @ SuppressWarnings ( "unchecked" ) public Class < ? extends Reducer < ? , ? , ? , ? > > getCombinerClass ( ) throws ClassNotFoundException { return ( Class < ? extends Reducer < ? , ? , ? , ? > > ) conf . getClass ( COMBINE_CLASS_ATTR , null ) ; } | Get the combiner class for the job . |
31,816 | public int compareTo ( Object o ) { long thisValue = this . value ; long thatValue = ( ( LongWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; } | Compares two LongWritables . |
31,817 | public static void processButtons ( HttpServletRequest request , HttpServletResponse response , JobTracker tracker ) throws IOException { if ( conf . getBoolean ( PRIVATE_ACTIONS_KEY , false ) && request . getParameter ( "killJobs" ) != null ) { String [ ] jobs = request . getParameterValues ( "jobCheckBox" ) ; if ( jo... | Method used to process the request from the job page based on the request which it has received . For example like changing priority . |
31,818 | public static String getJobDetailsHistoryLink ( JobTracker tracker , String jobId ) { RetireJobInfo info = tracker . retireJobs . get ( JobID . forName ( jobId ) ) ; String historyFileUrl = getHistoryFileUrl ( info ) ; String result = ( historyFileUrl == null ? "" : "jobdetailshistory.jsp?jobid=" + jobId + "&logFile=" ... | Given jobId resolve the link to jobdetailshistory . jsp |
31,819 | private static String getHistoryFileUrl ( RetireJobInfo info ) { String historyFile = info . getHistoryFile ( ) ; String historyFileUrl = null ; if ( historyFile != null && ! historyFile . equals ( "" ) ) { try { historyFileUrl = URLEncoder . encode ( info . getHistoryFile ( ) , "UTF-8" ) ; } catch ( UnsupportedEncodin... | Obtain history file URL from RetireJobInfo |
31,820 | public static String generateClusterResTable ( JobTracker tracker ) throws IOException { ResourceReporter reporter = tracker . getResourceReporter ( ) ; if ( reporter == null ) { return "" ; } StringBuffer sb = new StringBuffer ( ) ; sb . append ( "<table border=\"1\" cellpadding=\"5\" cellspacing=\"0\">\n" ) ; sb . ap... | Method used to generate the cluster resource utilization table |
31,821 | private void setupInputStream ( ) throws IOException { HttpURLConnection connection = ( HttpURLConnection ) url . openConnection ( ) ; connection . setConnectTimeout ( httpTimeout ) ; connection . setReadTimeout ( httpTimeout ) ; if ( connection . getResponseCode ( ) != HttpURLConnection . HTTP_OK ) { throw new IOExcep... | Get input stream for the image through http connection . |
31,822 | public Map < Integer , Map < ResourceType , Integer > > getCpuToResourcePartitioning ( ) { if ( cachedCpuToResourcePartitioning == null ) { cachedCpuToResourcePartitioning = getUncachedCpuToResourcePartitioning ( this ) ; } return cachedCpuToResourcePartitioning ; } | Get and cache the cpu to resource partitioning for this object . |
31,823 | public PoolInfo getPoolInfo ( ) { String poolNameProperty = get ( IMPLICIT_POOL_PROPERTY , "user.name" ) ; String explicitPool = get ( EXPLICIT_POOL_PROPERTY , get ( poolNameProperty , "" ) ) . trim ( ) ; String [ ] poolInfoSplitString = explicitPool . split ( "[.]" ) ; if ( poolInfoSplitString != null && poolInfoSplit... | Get the pool info . In order to support previous behavior a single pool name is accepted . |
31,824 | public boolean readMetaDataIfNeeded ( ) throws IOException { if ( eofReached ) { return false ; } if ( rawBlockOffset == 0 ) { try { metaDataConsumer . readMetaData ( in , metaDataBlockSize ) ; rawBlockOffset += metaDataBlockSize ; } catch ( EOFException e ) { eofReached = true ; return false ; } } return true ; } | Returns whether we ve reached EOF . |
31,825 | private boolean seekOrSkip ( long bytes , boolean toNewSource ) throws IOException { if ( seekableIn != null ) { int available = in . available ( ) ; try { if ( toNewSource ) { return seekableIn . seekToNewSource ( seekableIn . getPos ( ) + bytes ) ; } else { seekableIn . seek ( seekableIn . getPos ( ) + bytes ) ; retu... | This function depends on the underlying |
31,826 | protected boolean rawSkip ( long bytes , boolean toNewSource ) throws IOException { boolean result = seekOrSkip ( bytes , toNewSource ) ; setRawOffset ( getRawOffset ( ) + bytes ) ; if ( rawBlockOffset > 0 && rawBlockOffset < metaDataBlockSize ) { throw new IOException ( "Cannot jump into the middle of a MetaDataBlock.... | Skip some bytes from the raw InputStream . |
31,827 | public static void prepare ( String fname ) { if ( ! "Linux" . equalsIgnoreCase ( System . getProperty ( "os.name" ) ) ) { System . err . println ( "Linux system required for FailMon. Exiting..." ) ; System . exit ( 0 ) ; } System . setProperty ( "log4j.configuration" , "conf/log4j.properties" ) ; PropertyConfigurator ... | Initializes structures needed by other methods . Also determines whether the executing user has superuser privileges . |
31,828 | public static int getInterval ( ArrayList < MonitorJob > monitors ) { String tmp = getProperty ( "executor.interval.min" ) ; if ( tmp != null ) MIN_INTERVAL = Integer . parseInt ( tmp ) ; int [ ] monIntervals = new int [ monitors . size ( ) ] ; for ( int i = 0 ; i < monitors . size ( ) ; i ++ ) monIntervals [ i ] = mon... | Determines the minimum interval at which the executor thread needs to wake upto execute jobs . Essentially this is interval equals the GCD of intervals of all scheduled jobs . |
31,829 | public static boolean checkExistence ( String cmd ) { StringBuffer sb = runCommand ( "which " + cmd ) ; if ( sb . length ( ) > 1 ) return true ; return false ; } | Checks whether a specific shell command is available in the system . |
31,830 | public static StringBuffer runCommand ( String [ ] cmd ) { StringBuffer retval = new StringBuffer ( MAX_OUTPUT_LENGTH ) ; Process p ; try { p = Runtime . getRuntime ( ) . exec ( cmd ) ; InputStream tmp = p . getInputStream ( ) ; p . waitFor ( ) ; int c ; while ( ( c = tmp . read ( ) ) != - 1 ) retval . append ( ( char ... | Runs a shell command in the system and provides a StringBuffer with the output of the command . |
31,831 | protected URI createUri ( FileStatus i , UnixUserGroupInformation ugi , ClientProtocol nnproxy , HttpServletRequest request ) throws IOException , URISyntaxException { return createUri ( i . getPath ( ) . toString ( ) , pickSrcDatanode ( i , nnproxy ) , ugi , request ) ; } | Create a redirection URI |
31,832 | private static DatanodeInfo [ ] pickSrcDatanode ( FileStatus i , ClientProtocol nnproxy ) throws IOException { if ( jspHelper == null ) jspHelper = new JspHelper ( ) ; final LocatedBlocks blks = nnproxy . getBlockLocations ( i . getPath ( ) . toUri ( ) . getPath ( ) , 0 , 1 ) ; if ( i . getLen ( ) == 0 || blks . getLoc... | Select a datanode to service this request which is the first one in the returned array . The rest of the elements in the datanode are possible candidates if the first one fails . Currently this looks at no more than the first five blocks of a file selecting a datanode randomly from the most represented . |
31,833 | public static void skip ( DataInput in ) throws IOException { int length = in . readUnsignedShort ( ) ; WritableUtils . skipFully ( in , length ) ; } | Skips over one UTF8 in the input . |
31,834 | public int compareTo ( Object o ) { UTF8 that = ( UTF8 ) o ; return WritableComparator . compareBytes ( bytes , 0 , length , that . bytes , 0 , that . length ) ; } | Compare two UTF8s . |
31,835 | public static byte [ ] getBytes ( String string ) { byte [ ] result = new byte [ utf8Length ( string ) ] ; try { writeChars ( result , string , 0 , string . length ( ) ) ; } catch ( IOException e ) { throw new RuntimeException ( e ) ; } return result ; } | Convert a string to a UTF - 8 encoded byte array . |
31,836 | public static String readString ( DataInput in ) throws IOException { int bytes = in . readUnsignedShort ( ) ; return readChars ( in , bytes ) ; } | Read a UTF - 8 encoded string . |
31,837 | public static int writeString ( DataOutput out , String s ) throws IOException { if ( s . length ( ) > 0xffff / 3 ) { LOG . warn ( "truncating long string: " + s . length ( ) + " chars, starting with " + s . substring ( 0 , 20 ) ) ; s = s . substring ( 0 , 0xffff / 3 ) ; } int len = utf8Length ( s ) ; if ( len > 0xffff... | Write a UTF - 8 encoded string . |
31,838 | private static int utf8Length ( String string ) { int stringLength = string . length ( ) ; int utf8Length = 0 ; for ( int i = 0 ; i < stringLength ; i ++ ) { int c = string . charAt ( i ) ; if ( c <= 0x007F ) { utf8Length ++ ; } else if ( c > 0x07FF ) { utf8Length += 3 ; } else { utf8Length += 2 ; } } return utf8Length... | Returns the number of bytes required to write this . |
31,839 | public void create ( ) throws IOException { fc . truncate ( 0 ) ; fc . position ( 0 ) ; doubleBuf . getCurrentBuf ( ) . writeInt ( FSConstants . LAYOUT_VERSION ) ; setReadyToFlush ( ) ; flush ( ) ; } | Create empty edits logs file . |
31,840 | public void setReadyToFlush ( ) throws IOException { doubleBuf . getCurrentBuf ( ) . write ( FSEditLogOpCodes . OP_INVALID . getOpCode ( ) ) ; doubleBuf . setReadyToFlush ( ) ; } | All data that has been written to the stream so far will be flushed . New data can be still written to the stream while flushing is performed . |
31,841 | protected void flushAndSync ( boolean durable ) throws IOException { if ( fp == null ) { throw new IOException ( "Trying to use aborted output stream" ) ; } preallocate ( ) ; if ( doubleBuf . isFlushed ( ) ) { return ; } doubleBuf . flushTo ( fp ) ; if ( durable ) { fc . force ( false ) ; } fc . position ( fc . positio... | Flush ready buffer to persistent store . currentBuffer is not flushed as it accumulates new log records while readyBuffer will be flushed and synced . |
31,842 | private void preallocate ( ) throws IOException { long position = fc . position ( ) ; long triggerSize = Math . max ( FSEditLog . preallocateSize / 100 , 4096 ) ; if ( position + triggerSize >= fc . size ( ) ) { if ( FSNamesystem . LOG . isDebugEnabled ( ) ) { FSNamesystem . LOG . debug ( "Preallocating Edit log, curre... | allocate a big chunk of data |
31,843 | private HashMap < DatanodeDescriptor , Integer > directoryDataNodeUsage ( INodeDirectory dir , int threshold ) { HashMap < DatanodeDescriptor , Integer > dataNodeUsage = new HashMap < DatanodeDescriptor , Integer > ( ) ; List < INode > children ; nameSystem . readLock ( ) ; try { if ( dir . getChildrenRaw ( ) == null )... | Iterates through files in the directory dir and counts the number of usages of datanodes for the files in the directory dir . Just iterates through threshold number of blocks . |
31,844 | protected static void checkSource ( Configuration conf , List < Path > srcs ) throws InvalidInputException { List < IOException > ioes = new ArrayList < IOException > ( ) ; for ( Path p : srcs ) { try { if ( ! p . getFileSystem ( conf ) . exists ( p ) ) { ioes . add ( new FileNotFoundException ( "Source " + p + " does ... | Sanity check for source |
31,845 | protected void connect ( ) { LOG . info ( "Connecting to collector..." ) ; try { conf . setStrings ( UnixUserGroupInformation . UGI_PROPERTY_NAME , new String [ ] { "hadoop" , "hadoop" } ) ; rpcCollector = ( UtilizationCollectorProtocol ) RPC . getProxy ( UtilizationCollectorProtocol . class , UtilizationCollectorProto... | Make connection to the Collector |
31,846 | protected void fetchData ( ) throws IOException { if ( rpcCollector == null ) { try { Thread . sleep ( RECONNECT_PERIOD ) ; } catch ( InterruptedException e ) { } connect ( ) ; } try { clusterUtil = rpcCollector . getClusterUtilization ( ) ; for ( JobUtilization job : rpcCollector . getAllRunningJobUtilization ( ) ) { ... | Mirror data from the Collector |
31,847 | public float getCapacity ( String queue ) { String raw = rmConf . getRaw ( toFullPropertyName ( queue , CAPACITY_PROPERTY ) ) ; if ( raw == null ) { return - 1 ; } float result = rmConf . getFloat ( toFullPropertyName ( queue , CAPACITY_PROPERTY ) , - 1 ) ; if ( result < 0.0 || result > 100.0 ) { throw new IllegalArgum... | Get the percentage of the cluster for the specified queue . |
31,848 | public void setCapacity ( String queue , float capacity ) { rmConf . setFloat ( toFullPropertyName ( queue , CAPACITY_PROPERTY ) , capacity ) ; } | Sets the capacity of the given queue . |
31,849 | public float getMaxCapacity ( String queue ) { float result = rmConf . getFloat ( toFullPropertyName ( queue , MAX_CAPACITY_PROPERTY ) , - 1 ) ; result = ( result <= 0 ) ? - 1 : result ; if ( result > 100.0 ) { throw new IllegalArgumentException ( "Illegal " + MAX_CAPACITY_PROPERTY + " for queue " + queue + " of " + re... | Return the maximum percentage of the cluster capacity that can be used by the given queue . This percentage defines a limit beyond which a queue cannot use the capacity of cluster . This provides a means to limit how much excess capacity a queue can use . By default there is no limit . |
31,850 | public void setMaxCapacity ( String queue , float maxCapacity ) { rmConf . setFloat ( toFullPropertyName ( queue , MAX_CAPACITY_PROPERTY ) , maxCapacity ) ; } | Sets the maxCapacity of the given queue . |
31,851 | public int getMinimumUserLimitPercent ( String queue ) { int userLimit = rmConf . getInt ( toFullPropertyName ( queue , "minimum-user-limit-percent" ) , defaultUlimitMinimum ) ; if ( userLimit <= 0 || userLimit > 100 ) { throw new IllegalArgumentException ( "Invalid user limit : " + userLimit + " for queue : " + queue ... | Get the minimum limit of resources for any user submitting jobs in this queue in percentage . |
31,852 | public int getMaxJobsPerUserToInitialize ( String queue ) { int maxJobsPerUser = rmConf . getInt ( toFullPropertyName ( queue , "maximum-initialized-jobs-per-user" ) , defaultMaxJobsPerUsersToInitialize ) ; if ( maxJobsPerUser <= 0 ) { throw new IllegalArgumentException ( "Invalid maximum jobs per user configuration " ... | Gets the maximum number of jobs which are allowed to initialize in the job queue . |
31,853 | boolean addBlock ( BlockInfo b ) { int dnIndex = b . addNode ( this ) ; if ( dnIndex < 0 ) return false ; blockList = b . listInsert ( blockList , this , dnIndex ) ; numOfBlocks ++ ; return true ; } | Add data - node to the block . Add block to the head of the list of blocks belonging to the data - node . |
31,854 | void insertIntoList ( BlockInfo head , int headIndex , BlockInfo tail , int tailIndex , int count ) { if ( head == null ) return ; tail . setNext ( tailIndex , blockList ) ; if ( blockList != null ) blockList . setPrevious ( blockList . findDatanode ( this ) , tail ) ; blockList = head ; blockList . setPrevious ( headI... | Adds blocks already connected into list to this descriptor s blocks . The blocks in the input list already have this descriptor inserted to them . Used for parallel initial block reports . |
31,855 | boolean removeBlock ( BlockInfo b ) { blockList = b . listRemove ( blockList , this ) ; if ( b . removeNode ( this ) ) { numOfBlocks -- ; return true ; } else { return false ; } } | Remove block from the list of blocks belonging to the data - node . Remove data - node from the block . |
31,856 | void moveBlockToHead ( BlockInfo b ) { blockList = b . listRemove ( blockList , this ) ; blockList = b . listInsert ( blockList , this , - 1 ) ; } | Move block to the head of the list of blocks belonging to the data - node . |
31,857 | protected BlockInfo listMoveToHead ( BlockInfo block , BlockInfo head , DatanodeIndex indexes ) { assert head != null : "Head can not be null" ; if ( head == block ) { return head ; } BlockInfo next = block . getSetNext ( indexes . currentIndex , head ) ; BlockInfo prev = block . getSetPrevious ( indexes . currentIndex... | Remove block from the list and insert into the head of the list of blocks related to the specified DatanodeDescriptor . If the head is null then form a new list . |
31,858 | void addBlockToBeReplicated ( Block block , DatanodeDescriptor [ ] targets ) { assert ( block != null && targets != null && targets . length > 0 ) ; replicateBlocks . offer ( block , targets ) ; } | Store block replication work . |
31,859 | void addBlockToBeRecovered ( Block block , DatanodeDescriptor [ ] targets ) { assert ( block != null && targets != null && targets . length > 0 ) ; recoverBlocks . offer ( block , targets ) ; } | Store block recovery work . |
31,860 | void addBlocksToBeInvalidated ( List < Block > blocklist ) { assert ( blocklist != null && blocklist . size ( ) > 0 ) ; synchronized ( invalidateBlocks ) { for ( Block blk : blocklist ) { invalidateBlocks . add ( blk ) ; } } } | Store block invalidation work . |
31,861 | BlockCommand getInvalidateBlocks ( int maxblocks ) { Block [ ] deleteList = null ; synchronized ( invalidateBlocks ) { deleteList = invalidateBlocks . pollToArray ( new Block [ Math . min ( invalidateBlocks . size ( ) , maxblocks ) ] ) ; } return ( deleteList == null || deleteList . length == 0 ) ? null : new BlockComm... | Remove the specified number of blocks to be invalidated |
31,862 | void readFieldsFromFSEditLog ( DataInput in ) throws IOException { this . name = UTF8 . readString ( in ) ; this . storageID = UTF8 . readString ( in ) ; this . infoPort = in . readShort ( ) & 0x0000ffff ; this . capacity = in . readLong ( ) ; this . dfsUsed = in . readLong ( ) ; this . remaining = in . readLong ( ) ; ... | Serialization for FSEditLog |
31,863 | private void rollBlocksScheduled ( long now ) { if ( ( now - lastBlocksScheduledRollTime ) > BLOCKS_SCHEDULED_ROLL_INTERVAL ) { prevApproxBlocksScheduled = currApproxBlocksScheduled ; currApproxBlocksScheduled = 0 ; lastBlocksScheduledRollTime = now ; } } | Adjusts curr and prev number of blocks scheduled every few minutes . |
31,864 | public String getMessage ( ) { StringBuffer result = new StringBuffer ( ) ; Iterator < IOException > itr = problems . iterator ( ) ; while ( itr . hasNext ( ) ) { result . append ( itr . next ( ) . getMessage ( ) ) ; if ( itr . hasNext ( ) ) { result . append ( "\n" ) ; } } return result . toString ( ) ; } | Get a summary message of the problems found . |
31,865 | private boolean newImage ( String name , String filename ) { ImageDescriptor id ; boolean success ; try { URL fileURL = FileLocator . find ( bundle , new Path ( RESOURCE_DIR + filename ) , null ) ; id = ImageDescriptor . createFromURL ( FileLocator . toFileURL ( fileURL ) ) ; success = true ; } catch ( Exception e ) { ... | Load and register a new image . If the image resource does not exist or fails to load a default error resource is supplied . |
31,866 | public static < K , V > QuorumException create ( String simpleMsg , Map < K , V > successes , Map < K , Throwable > exceptions ) { Preconditions . checkArgument ( ! exceptions . isEmpty ( ) , "Must pass exceptions" ) ; StringBuilder msg = new StringBuilder ( ) ; msg . append ( simpleMsg ) . append ( ". " ) ; if ( ! suc... | Create a QuorumException instance with a descriptive message detailing the underlying exceptions as well as any successful responses which were returned . |
31,867 | public static < K > void store ( Configuration conf , K item , String keyName ) throws IOException { DefaultStringifier < K > stringifier = new DefaultStringifier < K > ( conf , GenericsUtil . getClass ( item ) ) ; conf . set ( keyName , stringifier . toString ( item ) ) ; stringifier . close ( ) ; } | Stores the item in the configuration with the given keyName . |
31,868 | public static < K > K load ( Configuration conf , String keyName , Class < K > itemClass ) throws IOException { DefaultStringifier < K > stringifier = new DefaultStringifier < K > ( conf , itemClass ) ; try { String itemStr = conf . get ( keyName ) ; return stringifier . fromString ( itemStr ) ; } finally { stringifier... | Restores the object from the configuration . |
31,869 | public static < K > void storeArray ( Configuration conf , K [ ] items , String keyName ) throws IOException { DefaultStringifier < K > stringifier = new DefaultStringifier < K > ( conf , GenericsUtil . getClass ( items [ 0 ] ) ) ; try { StringBuilder builder = new StringBuilder ( ) ; for ( K item : items ) { builder .... | Stores the array of items in the configuration with the given keyName . |
31,870 | public static < K > K [ ] loadArray ( Configuration conf , String keyName , Class < K > itemClass ) throws IOException { DefaultStringifier < K > stringifier = new DefaultStringifier < K > ( conf , itemClass ) ; try { String itemStr = conf . get ( keyName ) ; ArrayList < K > list = new ArrayList < K > ( ) ; String [ ] ... | Restores the array of objects from the configuration . |
31,871 | public short getBlockReplication ( BlockInfo block ) { if ( storage . isSourceBlock ( block ) ) { return getReplication ( ) ; } else { if ( storage . getStorageType ( ) == StorageType . RAID_STORAGE ) { return ( ( INodeRaidStorage ) storage ) . getCodec ( ) . parityReplication ; } else { throw new IllegalStateException... | may have different replication |
31,872 | protected void reportRegister ( DatanodeID node ) { if ( node != null && shouldUpdateNodes ( ) ) { if ( ! liveDatanodes . contains ( node ) ) { outStandingHeartbeats . add ( node ) ; liveDatanodes . add ( node ) ; } } } | Processes a register from the datanode . First we will await a heartbeat and later for a incremental block report . |
31,873 | protected boolean reportHeartBeat ( DatanodeID node ) { if ( node != null && shouldUpdateNodes ( ) ) { reportRegister ( node ) ; synchronized ( this ) { if ( outStandingHeartbeats . remove ( node ) ) { outStandingReports . add ( node ) ; return true ; } } } return false ; } | Processes a heartbeat from the datanode and determines whether we should send a ClearPrimary command to it . |
31,874 | protected void reportPrimaryCleared ( DatanodeID node ) { if ( node != null && shouldUpdateNodes ( ) ) { if ( outStandingReports . remove ( node ) ) { LOG . info ( "Failover: Outstanding reports: " + outStandingReports . size ( ) ) ; } } } | Report that the given datanode has cleared the primary . It is fully aware of the failover and it has sent the incremental block report . |
31,875 | protected void triggerFailover ( ) throws IOException { clearDataStructures ( ) ; prepareFailover = false ; for ( DatanodeInfo node : namesystem . datanodeReport ( DatanodeReportType . LIVE ) ) { liveDatanodes . add ( node ) ; outStandingHeartbeats . add ( node ) ; } InjectionHandler . processEvent ( InjectionEvent . S... | Triggers failover processing for safe mode and blocks until we have left safe mode . |
31,876 | private synchronized boolean datanodeReportsReceived ( boolean checkDatanodes ) { try { boolean received = this . getDatanodeReportRatio ( ) >= this . outStandingReportThreshold ; if ( ! received && checkDatanodes ) { checkDatanodes ( ) ; return this . getDatanodeReportRatio ( ) >= this . outStandingReportThreshold ; }... | Checks if the datanode reports have been received |
31,877 | public void setPrimary ( InstanceId ofPrimary ) throws IOException { if ( ofPrimary == null ) { primaryNode = null ; standbyNode = null ; return ; } switch ( ofPrimary ) { case NODEZERO : primaryNode = getNodeZero ( ) ; standbyNode = getNodeOne ( ) ; case NODEONE : primaryNode = getNodeOne ( ) ; standbyNode = getNodeZe... | The purpose of this method is to update in memory references to primary and standby . Please override this method and do the zookeeper specific logic in the method and call this method on successful zookeeper write . |
31,878 | public void setConfSource ( Configurable src ) { validateConfigFile ( src . getConf ( ) ) ; confSrc = src ; zkClient = new AvatarZooKeeperClient ( confSrc . getConf ( ) , null , true ) ; } | Sets the backing configuration source |
31,879 | void addCache ( Path hdfsPath , Path localPath , long size ) throws IOException { localMetrics . numAdd ++ ; CacheEntry c = new CacheEntry ( hdfsPath , localPath , size ) ; CacheEntry found = cacheMap . putIfAbsent ( hdfsPath , c ) ; if ( found != null ) { assert size == found . entrySize ; assert localPath . equals ( ... | Adds an entry into the cache . The size is the virtual size of this entry . |
31,880 | void renameCache ( Path oldhdfsPath , Path newhdfsPath , Path localPath ) throws IOException { CacheEntry found = cacheMap . remove ( oldhdfsPath ) ; if ( found == null ) { String msg = "LookasideCache error renaming path: " + oldhdfsPath + " to: " + newhdfsPath + " Path " + newhdfsPath + " because it does not exists i... | Change the localPath in the cache . The size remains the same . The accesstime is updated . |
31,881 | void removeCache ( Path hdfsPath ) { CacheEntry c = cacheMap . remove ( hdfsPath ) ; if ( c != null ) { cacheSize . addAndGet ( - c . entrySize ) ; localMetrics . numRemove ++ ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "LookasideCache removed path:" + hdfsPath + " freed up size: " + c . entrySize ) ; } } } | Delete an entry from the cache . |
31,882 | void evictCache ( Path hdfsPath ) throws IOException { CacheEntry c = cacheMap . remove ( hdfsPath ) ; if ( c != null ) { cacheSize . addAndGet ( - c . entrySize ) ; if ( evictionIface != null ) { evictionIface . evictCache ( c . hdfsPath , c . localPath , c . entrySize ) ; } localMetrics . numEvict ++ ; if ( LOG . isD... | Evicts an entry from the cache . This calls back into the application to indicate that a cache entry has been reclaimed . |
31,883 | Path getCache ( Path hdfsPath ) { CacheEntry c = cacheMap . get ( hdfsPath ) ; localMetrics . numGetAttempts ++ ; if ( c != null ) { c . setGenstamp ( globalStamp . incrementAndGet ( ) ) ; localMetrics . numGetHits ++ ; return c . localPath ; } return null ; } | Maps the hdfs pathname to a local pathname . Returns null if this is not found in the cache . |
31,884 | synchronized void checkEvict ( ) throws IOException { if ( cacheSize . get ( ) < cacheSizeMax ) { return ; } if ( evictionInProgress ) { return ; } evictionInProgress = true ; try { long curSize = cacheSize . get ( ) ; long targetSize = cacheSizeMax - ( cacheSizeMax * cacheEvictPercent ) / 100 ; if ( LOG . isDebugEnabl... | Eviction occurs if the cache is full we free up a specified percentage of the cache on every run . This method is synchronized so that only one thread is doing the eviction . |
31,885 | public synchronized static Random getSecureRandom ( ) { if ( cachedSecureRandom != null ) return cachedSecureRandom ; try { return SecureRandom . getInstance ( "SHA1PRNG" ) ; } catch ( NoSuchAlgorithmException e ) { return R ; } } | get an instance of a SecureRandom for creating storageid |
31,886 | protected void initGlobalSetting ( Configuration conf , AbstractList < File > dataDirs ) throws IOException { this . dataDirs = dataDirs ; this . conf = conf ; storage = new DataStorage ( this ) ; initConfig ( conf ) ; registerMXBean ( ) ; initDataXceiver ( conf ) ; startInfoServer ( conf ) ; initIpcServer ( conf ) ; m... | Initialize global settings for DN |
31,887 | protected void initDataSetAndScanner ( Configuration conf , AbstractList < File > dataDirs , int numOfNameSpaces ) throws IOException { initFsDataSet ( conf , dataDirs , numOfNameSpaces ) ; initDataBlockScanner ( conf ) ; initDirectoryScanner ( conf ) ; } | Initialize dataset and block scanner |
31,888 | void startDataNode ( Configuration conf , AbstractList < File > dataDirs ) throws IOException { initGlobalSetting ( conf , dataDirs ) ; List < InetSocketAddress > nameNodeAddrs = DFSUtil . getNNServiceRpcAddresses ( conf ) ; DataNode . nameNodeAddr = nameNodeAddrs . get ( 0 ) ; namespaceManager = new NamespaceManager (... | This method starts the data node with the specified conf . |
31,889 | public static InetSocketAddress getNameNodeAddress ( Configuration conf ) { InetSocketAddress addr = null ; addr = NameNode . getDNProtocolAddress ( conf ) ; if ( addr != null ) { return addr ; } return NameNode . getClientProtocolAddress ( conf ) ; } | This method returns the address namenode uses to communicate with datanodes . If this address is not configured the default NameNode address is used as it is running only one RPC server . If it is running multiple servers this address cannot be used by clients!! |
31,890 | public DatanodeProtocol getNSNamenode ( int namespaceId ) throws IOException { NamespaceService nsos = namespaceManager . get ( namespaceId ) ; if ( nsos == null || nsos . getDatanodeProtocol ( ) == null ) { throw new IOException ( "cannot find a namnode proxy for namespaceId=" + namespaceId ) ; } return nsos . getData... | Get namenode corresponding to a namespace |
31,891 | public DatanodeRegistration getDNRegistrationForNS ( int namespaceId ) throws IOException { NamespaceService nsos = namespaceManager . get ( namespaceId ) ; if ( nsos == null || nsos . getNsRegistration ( ) == null ) { throw new IOException ( "cannot find NSOfferService for namespaceId=" + namespaceId ) ; } return nsos... | get datanode registration by namespace id |
31,892 | public void shutdown ( ) { if ( this . shuttingDown . getAndSet ( true ) ) { LOG . warn ( "DataNode.shutdown() was called while shutting down." ) ; return ; } if ( infoServer != null ) { try { infoServer . stop ( ) ; } catch ( Exception e ) { LOG . warn ( "Exception shutting down DataNode" , e ) ; } } if ( ipcServer !=... | Shut down this instance of the datanode . Returns only after shutdown is complete . This method can only be called by the offerService thread . Otherwise deadlock might occur . |
31,893 | protected void checkDiskError ( Exception e ) throws IOException { if ( e instanceof ClosedByInterruptException || e instanceof java . io . InterruptedIOException ) { return ; } LOG . warn ( "checkDiskError: exception: " , e ) ; if ( e . getMessage ( ) != null && e . getMessage ( ) . startsWith ( "No space left on devi... | Check if there is no space in disk |
31,894 | protected void checkDiskError ( ) throws IOException { boolean setSuccess = checkingDisk . compareAndSet ( false , true ) ; if ( ! setSuccess ) { LOG . info ( "checkDiskError is already running." ) ; return ; } try { long curTime = System . currentTimeMillis ( ) ; if ( curTime - timeLastCheckDisk < minDiskCheckInterval... | Check if there is a disk failure and if so handle the error |
31,895 | public static String [ ] getListOfDataDirs ( Configuration conf ) { String [ ] configFilePath = conf . getStrings ( "dfs.datadir.confpath" ) ; String [ ] dataDirs = null ; if ( configFilePath != null && ( configFilePath . length != 0 ) ) { try { DataDirFileReader reader = new DataDirFileReader ( configFilePath [ 0 ] ) ... | Returns a list of data directories from the file provided by the dfs . datadir . confpath . If it cannot get the list of data directories then the method will return the default dataDirs from dfs . data . dir . |
31,896 | public static DataNode createDataNode ( String args [ ] , Configuration conf ) throws IOException { DataNode dn = instantiateDataNode ( args , conf ) ; if ( dn != null ) { dn . runDatanodeDaemon ( ) ; } return dn ; } | Instantiate & Start a single datanode daemon and wait for it to finish . If this thread is specifically interrupted it will stop waiting . |
31,897 | public String getNamenodeAddresses ( ) { final Map < String , Integer > info = new HashMap < String , Integer > ( ) ; for ( NamespaceService ns : namespaceManager . getAllNamenodeThreads ( ) ) { if ( ns != null && ns . initialized ( ) ) { info . put ( ns . getNNSocketAddress ( ) . getAddress ( ) . getHostAddress ( ) , ... | Returned information is a JSON representation of a map with name node host name as the key and block pool Id as the value |
31,898 | public String getVolumeInfo ( ) { final Map < String , Object > info = new HashMap < String , Object > ( ) ; try { FSVolume [ ] volumes = ( ( FSDataset ) this . data ) . volumes . getVolumes ( ) ; for ( FSVolume v : volumes ) { final Map < String , Object > innerInfo = new HashMap < String , Object > ( ) ; innerInfo . ... | Returned information is a JSON representation of a map with volume name as the key and value is a map of volume attribute keys to its values |
31,899 | public void sendBlocksBeingWrittenReport ( DatanodeProtocol node , int namespaceId , DatanodeRegistration nsRegistration ) throws IOException { Block [ ] blocks = data . getBlocksBeingWrittenReport ( namespaceId ) ; if ( blocks != null && blocks . length != 0 ) { long [ ] blocksAsLong = BlockListAsLongs . convertToArra... | Sends a Blocks Being Written report to the given node . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.