idx
int64
0
41.2k
question
stringlengths
73
5.81k
target
stringlengths
5
918
31,700
static String getStatusCssClass ( DeviceTestResult testResult ) { String status ; switch ( testResult . getStatus ( ) ) { case PASS : status = "pass" ; break ; case IGNORED : status = "ignored" ; break ; case FAIL : status = "fail" ; break ; case ASSUMPTION_FAILURE : status = "assumption-violation" ; break ; default : throw new IllegalArgumentException ( "Unknown result status: " + testResult . getStatus ( ) ) ; } return status ; }
Convert a test result status into an HTML CSS class .
31,701
static IDevice obtainRealDevice ( AndroidDebugBridge adb , String serial ) { for ( IDevice adbDevice : adb . getDevices ( ) ) { if ( adbDevice . getSerialNumber ( ) . equals ( serial ) ) { return adbDevice ; } } throw new IllegalArgumentException ( "Unknown device serial: " + serial ) ; }
Fetch or create a real device that corresponds to a device model .
31,702
public static Set < String > findAllDevices ( AndroidDebugBridge adb , Integer minApiLevel ) { Set < String > devices = new LinkedHashSet < > ( ) ; for ( IDevice realDevice : adb . getDevices ( ) ) { if ( minApiLevel == null ) { devices . add ( realDevice . getSerialNumber ( ) ) ; } else { DeviceDetails deviceDetails = DeviceDetails . createForDevice ( realDevice ) ; int apiLevel = deviceDetails . getApiLevel ( ) ; if ( apiLevel == DeviceDetails . UNKNOWN_API_LEVEL || apiLevel >= minApiLevel ) { devices . add ( realDevice . getSerialNumber ( ) ) ; } } } return devices ; }
Find all device serials that are plugged in through ADB .
31,703
public final int next ( ) throws IOException { if ( m_nextException != null ) { throw m_nextException ; } try { return doNext ( ) ; } catch ( IOException e ) { m_nextException = e ; resetState ( ) ; throw e ; } }
Advances to the next tag . Once method returns END_DOCUMENT it always returns END_DOCUMENT . Once method throws an exception it always throws the same exception .
31,704
public final int getAttributeResourceID ( int index ) { int resourceIndex = getAttribute ( index ) . name ; if ( m_resourceIDs == null || resourceIndex < 0 || resourceIndex >= m_resourceIDs . length ) { return 0 ; } return m_resourceIDs [ resourceIndex ] ; }
Returns attribute resource ID .
31,705
private RemoteAndroidTestRunner createConfiguredRunner ( String testPackage , String testRunner , IDevice device ) throws Exception { RemoteAndroidTestRunner runner = new SpoonAndroidTestRunner ( instrumentationInfo . getApplicationPackage ( ) , testPackage , testRunner , device , clearAppDataBeforeEachTest , debug ) ; runner . setMaxTimeToOutputResponse ( adbTimeout . toMillis ( ) , TimeUnit . MILLISECONDS ) ; for ( Map . Entry < String , String > entry : instrumentationArgs . entrySet ( ) ) { if ( isNullOrEmpty ( entry . getKey ( ) ) || isNullOrEmpty ( entry . getValue ( ) ) ) { logDebug ( debug , "Can't process instrumentationArg [%s] (empty key or value)" , entry . getKey ( ) + "=>" + entry . getValue ( ) ) ; continue ; } runner . addInstrumentationArg ( entry . getKey ( ) , entry . getValue ( ) ) ; } if ( numShards != 0 ) { addShardingInstrumentationArgs ( runner ) ; } if ( ! isNullOrEmpty ( className ) ) { if ( isNullOrEmpty ( methodName ) ) { runner . setClassName ( className ) ; } else { runner . setMethodName ( className , methodName ) ; } } if ( testSize != null ) { runner . setTestSize ( testSize ) ; } if ( codeCoverage ) { addCodeCoverageInstrumentationArgs ( runner , device ) ; } return runner ; }
Create a configured Test Runner . This method adds sharding class name method name test size and coverage if available .
31,706
private void pullDeviceFiles ( IDevice device ) throws Exception { for ( String dir : DEVICE_DIRS ) { pullDirectory ( device , dir ) ; } }
Download all files from a single device to the local machine .
31,707
private void cleanData ( List < GalenTestInfo > testInfos ) { for ( GalenTestInfo testInfo : testInfos ) { if ( testInfo . getReport ( ) != null ) { try { FileTempStorage storage = testInfo . getReport ( ) . getFileStorage ( ) ; if ( storage != null ) { storage . cleanup ( ) ; } } catch ( Exception e ) { LOG . error ( "Unkown error during report cleaning" , e ) ; } } } }
Removes temporary test data
31,708
public String evalStrictToString ( String script ) { Object returnedObject = context . evaluateString ( scope , script , "<cmd>" , 1 , null ) ; String unwrappedObject = unwrapProcessedObjectToString ( returnedObject ) ; if ( unwrappedObject != null ) { return unwrappedObject ; } else return "null" ; }
Used for processing js expressions in page spec reader . In case of failure throws an exception
31,709
private List < PageItemNode > restructurePageItems ( List < PageItem > items ) { List < PageItemNode > pins = items . stream ( ) . map ( PageItemNode :: new ) . collect ( toList ( ) ) ; for ( PageItemNode pinA : pins ) { for ( PageItemNode pinB : pins ) { if ( pinA != pinB ) { if ( isInside ( pinA . getPageItem ( ) . getArea ( ) , pinB . getPageItem ( ) . getArea ( ) ) ) { if ( pinB . getParent ( ) == pinA ) { throw new RuntimeException ( format ( "The following objects have identical areas: %s, %s. Please remove one of the objects" , pinA . getPageItem ( ) . getName ( ) , pinB . getPageItem ( ) . getName ( ) ) ) ; } pinA . moveToParent ( pinB ) ; break ; } } } } return pins . stream ( ) . filter ( pin -> pin . getParent ( ) == null && pin . getChildren ( ) . size ( ) > 0 ) . collect ( toList ( ) ) ; }
Orders page items into a tree by their area . Tries to fit one item inside another
31,710
public void setObjects ( Map < String , Locator > objects ) { this . objects . clear ( ) ; if ( objects != null ) { this . objects . putAll ( objects ) ; } }
Clears current objects list and sets new object list
31,711
public void setObjectGroups ( Map < String , List < String > > objectGroups ) { this . objectGroups . clear ( ) ; if ( objectGroups != null ) { this . objectGroups . putAll ( objectGroups ) ; } }
Clears the current object groups list and sets new group list
31,712
public void setSections ( List < PageSection > sections ) { this . sections . clear ( ) ; if ( sections != null ) { this . sections . addAll ( sections ) ; } }
Clears the current root sections and copies new sections from given list
31,713
public List < String > findOnlyExistingMatchingObjectNames ( String objectExpression ) { String [ ] parts = objectExpression . split ( "," ) ; List < String > allSortedObjectNames = getSortedObjectNames ( ) ; List < String > resultingObjectNames = new LinkedList < > ( ) ; for ( String part : parts ) { String singleExpression = part . trim ( ) ; if ( ! singleExpression . isEmpty ( ) ) { if ( GalenUtils . isObjectGroup ( singleExpression ) ) { resultingObjectNames . addAll ( findObjectsInGroup ( GalenUtils . extractGroupName ( singleExpression ) ) ) ; } else if ( GalenUtils . isObjectsSearchExpression ( singleExpression ) ) { Pattern objectPattern = GalenUtils . convertObjectNameRegex ( singleExpression ) ; for ( String objectName : allSortedObjectNames ) { if ( objectPattern . matcher ( objectName ) . matches ( ) ) { resultingObjectNames . add ( objectName ) ; } } } else if ( objects . containsKey ( singleExpression ) ) { resultingObjectNames . add ( singleExpression ) ; } } } return resultingObjectNames ; }
Find all objects that match galen object statements
31,714
public List < String > getSortedObjectNames ( ) { List < String > list = new ArrayList < > ( getObjects ( ) . keySet ( ) ) ; Collections . sort ( list , new AlphanumericComparator ( ) ) ; return list ; }
Returns an alphanumericly sorted list of names of all declared objects
31,715
public List < String > findObjectsInGroup ( String groupName ) { if ( getObjectGroups ( ) . containsKey ( groupName ) ) { return getObjectGroups ( ) . get ( groupName ) ; } else { return Collections . emptyList ( ) ; } }
Find all objects belonging to a specific group
31,716
public void merge ( PageSpec spec ) { if ( spec == null ) { throw new IllegalArgumentException ( "Cannot merge null spec" ) ; } objects . putAll ( spec . getObjects ( ) ) ; sections . addAll ( spec . getSections ( ) ) ; objectGroups . putAll ( spec . getObjectGroups ( ) ) ; }
Merges all objects sections and objectGroups from spec
31,717
public void addSpec ( String sectionName , String objectName , String specText ) { PageSection pageSection = findSection ( sectionName ) ; if ( pageSection == null ) { pageSection = new PageSection ( sectionName ) ; sections . add ( pageSection ) ; } ObjectSpecs objectSpecs = new ObjectSpecs ( objectName ) ; objectSpecs . addSpec ( new SpecReader ( ) . read ( specText ) ) ; pageSection . addObjects ( objectSpecs ) ; }
Parses the spec from specText and adds it to the page spec inside specified section . If section does not exit it will create it
31,718
public void cleanup ( ) { if ( this . childStorages != null ) { for ( FileTempStorage storage : this . childStorages ) { storage . cleanup ( ) ; } } for ( File file : this . files . values ( ) ) { FileUtils . deleteQuietly ( file ) ; } this . files . clear ( ) ; }
Removes all temporary files from disk . IMPORTANT! Use this call only in the end when you are sure you don t need report files anymore
31,719
public static BufferedImage resizeScreenshotIfNeeded ( WebDriver driver , BufferedImage screenshotImage ) { Double devicePixelRatio = 1.0 ; try { devicePixelRatio = ( ( Number ) ( ( JavascriptExecutor ) driver ) . executeScript ( JS_RETRIEVE_DEVICE_PIXEL_RATIO ) ) . doubleValue ( ) ; } catch ( Exception ex ) { ex . printStackTrace ( ) ; } if ( devicePixelRatio > 1.0 && screenshotImage . getWidth ( ) > 0 ) { Long screenSize = ( ( Number ) ( ( JavascriptExecutor ) driver ) . executeScript ( "return Math.max(" + "document.body.scrollWidth, document.documentElement.scrollWidth," + "document.body.offsetWidth, document.documentElement.offsetWidth," + "document.body.clientWidth, document.documentElement.clientWidth);" ) ) . longValue ( ) ; Double estimatedPixelRatio = ( ( double ) screenshotImage . getWidth ( ) ) / ( ( double ) screenSize ) ; if ( estimatedPixelRatio > 1.0 ) { int newWidth = ( int ) ( screenshotImage . getWidth ( ) / estimatedPixelRatio ) ; int newHeight = ( int ) ( screenshotImage . getHeight ( ) / estimatedPixelRatio ) ; Image tmp = screenshotImage . getScaledInstance ( newWidth , newHeight , Image . SCALE_SMOOTH ) ; BufferedImage scaledImage = new BufferedImage ( newWidth , newHeight , BufferedImage . TYPE_INT_RGB ) ; Graphics2D g2d = scaledImage . createGraphics ( ) ; g2d . drawImage ( tmp , 0 , 0 , null ) ; g2d . dispose ( ) ; return scaledImage ; } else return screenshotImage ; } else return screenshotImage ; }
Check the devicePixelRatio and adapts the size of the screenshot as if the ratio was 1 . 0
31,720
public int calculatePointOffsetDistance ( Point point ) { int right = left + width ; int bottom = top + height ; int pointLeft = point . getLeft ( ) ; int pointTop = point . getTop ( ) ; if ( contains ( point ) ) { return max ( top - pointTop , pointTop - bottom , left - pointLeft , pointLeft - right ) ; } else if ( isQuadrant1 ( point ) ) { return max ( abs ( left - pointLeft ) , abs ( top - pointTop ) ) ; } else if ( isQuadrant2 ( point ) ) { return abs ( top - pointTop ) ; } else if ( isQuadrant3 ( point ) ) { return max ( abs ( pointLeft - right ) , abs ( top - pointTop ) ) ; } else if ( isQuadrant4 ( point ) ) { return abs ( pointLeft - right ) ; } else if ( isQuadrant5 ( point ) ) { return max ( abs ( pointLeft - right ) , abs ( pointTop - bottom ) ) ; } else if ( isQuadrant6 ( point ) ) { return abs ( pointTop - bottom ) ; } else if ( isQuadrant7 ( point ) ) { return max ( abs ( left - pointLeft ) , abs ( pointTop - bottom ) ) ; } else { return abs ( left - pointLeft ) ; } }
Calculates the distance of given point to one of the rect edges . If the point is located inside the return result will be negative . If the point is located on edge the result will be zero If the point is located outside of the rect - it will return positive value
31,721
public static LayoutReport checkPageSpecLayout ( WebDriver driver , PageSpec pageSpec , String [ ] includedTags , String [ ] excludedTags , String screenshotFilePath ) throws IOException { TestSession session = TestSession . current ( ) ; if ( session == null ) { throw new UnregisteredTestSession ( "Cannot check layout as there was no TestSession created" ) ; } TestReport report = session . getReport ( ) ; File screenshotFile = null ; if ( screenshotFilePath != null ) { screenshotFile = new File ( screenshotFilePath ) ; if ( ! screenshotFile . exists ( ) || ! screenshotFile . isFile ( ) ) { throw new IOException ( "Couldn't find screenshot in " + screenshotFilePath ) ; } } if ( pageSpec == null ) { throw new IOException ( "Page spec is not defined" ) ; } List < String > includedTagsList = toList ( includedTags ) ; LayoutReport layoutReport = Galen . checkLayout ( new SeleniumBrowser ( driver ) , pageSpec , new SectionFilter ( includedTagsList , toList ( excludedTags ) ) , screenshotFile , session . getListener ( ) ) ; GalenUtils . attachLayoutReport ( layoutReport , report , "<unknown>" , includedTagsList ) ; return layoutReport ; }
Used in GalenApi . js
31,722
protected String getReport ( ) { StringBuffer sb = new StringBuffer ( ) ; Iterator iter = this . longCounters . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Entry e = ( Entry ) iter . next ( ) ; sb . append ( e . getKey ( ) . toString ( ) ) . append ( "\t" ) . append ( e . getValue ( ) ) . append ( "\n" ) ; } iter = this . doubleCounters . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Entry e = ( Entry ) iter . next ( ) ; sb . append ( e . getKey ( ) . toString ( ) ) . append ( "\t" ) . append ( e . getValue ( ) ) . append ( "\n" ) ; } return sb . toString ( ) ; }
log the counters
31,723
public static String stringifySolution ( int width , int height , List < List < ColumnName > > solution ) { String [ ] [ ] picture = new String [ height ] [ width ] ; StringBuffer result = new StringBuffer ( ) ; for ( List < ColumnName > row : solution ) { Piece piece = null ; for ( ColumnName item : row ) { if ( item instanceof Piece ) { piece = ( Piece ) item ; break ; } } for ( ColumnName item : row ) { if ( item instanceof Point ) { Point p = ( Point ) item ; picture [ p . y ] [ p . x ] = piece . getName ( ) ; } } } for ( int y = 0 ; y < picture . length ; ++ y ) { for ( int x = 0 ; x < picture [ y ] . length ; ++ x ) { result . append ( picture [ y ] [ x ] ) ; } result . append ( "\n" ) ; } return result . toString ( ) ; }
Convert a solution to the puzzle returned by the model into a string that represents the placement of the pieces onto the board .
31,724
public SolutionCategory getCategory ( List < List < ColumnName > > names ) { Piece xPiece = null ; for ( Piece p : pieces ) { if ( "x" . equals ( p . name ) ) { xPiece = p ; break ; } } for ( List < ColumnName > row : names ) { if ( row . contains ( xPiece ) ) { int low_x = width ; int high_x = 0 ; int low_y = height ; int high_y = 0 ; for ( ColumnName col : row ) { if ( col instanceof Point ) { int x = ( ( Point ) col ) . x ; int y = ( ( Point ) col ) . y ; if ( x < low_x ) { low_x = x ; } if ( x > high_x ) { high_x = x ; } if ( y < low_y ) { low_y = y ; } if ( y > high_y ) { high_y = y ; } } } boolean mid_x = ( low_x + high_x == width - 1 ) ; boolean mid_y = ( low_y + high_y == height - 1 ) ; if ( mid_x && mid_y ) { return SolutionCategory . CENTER ; } else if ( mid_x ) { return SolutionCategory . MID_X ; } else if ( mid_y ) { return SolutionCategory . MID_Y ; } break ; } } return SolutionCategory . UPPER_LEFT ; }
Find whether the solution has the x in the upper left quadrant the x - midline the y - midline or in the center .
31,725
protected void initializePieces ( ) { pieces . add ( new Piece ( "x" , " x /xxx/ x " , false , oneRotation ) ) ; pieces . add ( new Piece ( "v" , "x /x /xxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "t" , "xxx/ x / x " , false , fourRotations ) ) ; pieces . add ( new Piece ( "w" , " x/ xx/xx " , false , fourRotations ) ) ; pieces . add ( new Piece ( "u" , "x x/xxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "i" , "xxxxx" , false , twoRotations ) ) ; pieces . add ( new Piece ( "f" , " xx/xx / x " , true , fourRotations ) ) ; pieces . add ( new Piece ( "p" , "xx/xx/x " , true , fourRotations ) ) ; pieces . add ( new Piece ( "z" , "xx / x / xx" , true , twoRotations ) ) ; pieces . add ( new Piece ( "n" , "xx / xxx" , true , fourRotations ) ) ; pieces . add ( new Piece ( "y" , " x /xxxx" , true , fourRotations ) ) ; pieces . add ( new Piece ( "l" , " x/xxxx" , true , fourRotations ) ) ; }
Fill in the pieces list .
31,726
private static void generateRows ( DancingLinks dancer , Piece piece , int width , int height , boolean flip , boolean [ ] row , boolean upperLeft ) { int [ ] rotations = piece . getRotations ( ) ; for ( int rotIndex = 0 ; rotIndex < rotations . length ; ++ rotIndex ) { boolean [ ] [ ] shape = piece . getShape ( flip , rotations [ rotIndex ] ) ; for ( int x = 0 ; x < width ; ++ x ) { for ( int y = 0 ; y < height ; ++ y ) { if ( y + shape . length <= height && x + shape [ 0 ] . length <= width && ( ! upperLeft || ( isSide ( x , shape [ 0 ] . length , width ) && isSide ( y , shape . length , height ) ) ) ) { for ( int idx = 0 ; idx < width * height ; ++ idx ) { row [ idx ] = false ; } for ( int subY = 0 ; subY < shape . length ; ++ subY ) { for ( int subX = 0 ; subX < shape [ 0 ] . length ; ++ subX ) { row [ ( y + subY ) * width + x + subX ] = shape [ subY ] [ subX ] ; } } dancer . addRow ( row ) ; } } } } }
For a given piece generate all of the potential placements and add them as rows to the model .
31,727
public static void main ( String [ ] args ) { int width = 6 ; int height = 10 ; Pentomino model = new Pentomino ( width , height ) ; List splits = model . getSplits ( 2 ) ; for ( Iterator splitItr = splits . iterator ( ) ; splitItr . hasNext ( ) ; ) { int [ ] choices = ( int [ ] ) splitItr . next ( ) ; System . out . print ( "split:" ) ; for ( int i = 0 ; i < choices . length ; ++ i ) { System . out . print ( " " + choices [ i ] ) ; } System . out . println ( ) ; System . out . println ( model . solve ( choices ) + " solutions found." ) ; } }
Solve the 6x10 pentomino puzzle .
31,728
protected String constructQuery ( String table , String [ ] fieldNames ) { if ( fieldNames == null ) { throw new IllegalArgumentException ( "Field names may not be null" ) ; } StringBuilder query = new StringBuilder ( ) ; query . append ( "INSERT INTO " ) . append ( table ) ; if ( fieldNames . length > 0 && fieldNames [ 0 ] != null ) { query . append ( " (" ) ; for ( int i = 0 ; i < fieldNames . length ; i ++ ) { query . append ( fieldNames [ i ] ) ; if ( i != fieldNames . length - 1 ) { query . append ( "," ) ; } } query . append ( ")" ) ; } query . append ( " VALUES (" ) ; for ( int i = 0 ; i < fieldNames . length ; i ++ ) { query . append ( "?" ) ; if ( i != fieldNames . length - 1 ) { query . append ( "," ) ; } } query . append ( ");" ) ; return query . toString ( ) ; }
Constructs the query used as the prepared statement to insert data .
31,729
public static void setOutput ( JobConf job , String tableName , String ... fieldNames ) { job . setOutputFormat ( DBOutputFormat . class ) ; job . setReduceSpeculativeExecution ( false ) ; DBConfiguration dbConf = new DBConfiguration ( job ) ; dbConf . setOutputTableName ( tableName ) ; dbConf . setOutputFieldNames ( fieldNames ) ; }
Initializes the reduce - part of the job with the appropriate output settings
31,730
public List < String > getRacks ( ) { netlock . readLock ( ) . lock ( ) ; try { return new ArrayList < String > ( racks ) ; } finally { netlock . readLock ( ) . unlock ( ) ; } }
Get a copy of the racks list .
31,731
public void add ( Node node ) { if ( node == null ) return ; if ( node instanceof InnerNode ) { throw new IllegalArgumentException ( "Not allow to add an inner node: " + NodeBase . getPath ( node ) ) ; } netlock . writeLock ( ) . lock ( ) ; try { Node rack = getNode ( node . getNetworkLocation ( ) ) ; if ( rack != null && ! ( rack instanceof InnerNode ) ) { throw new IllegalArgumentException ( "Unexpected data node " + node . toString ( ) + " at an illegal network location" ) ; } if ( clusterMap . add ( node ) ) { LOG . info ( "Adding a new node: " + NodeBase . getPath ( node ) ) ; if ( rack == null ) { String rackName = node . getNetworkLocation ( ) ; if ( ! racks . contains ( rackName ) ) { racks . add ( rackName ) ; } else { LOG . error ( "Discrepancy between network topology and list of racks. " + "New rack was already in the list of racks: " + rackName ) ; } } } LOG . debug ( "NetworkTopology became:\n" + this . toString ( ) ) ; } finally { netlock . writeLock ( ) . unlock ( ) ; } }
Add a leaf node Update node counter & rack counter if neccessary
31,732
public void remove ( Node node ) { if ( node == null ) return ; if ( node instanceof InnerNode ) { throw new IllegalArgumentException ( "Not allow to remove an inner node: " + NodeBase . getPath ( node ) ) ; } LOG . info ( "Removing a node: " + NodeBase . getPath ( node ) ) ; netlock . writeLock ( ) . lock ( ) ; try { String currentRackName = node . getNetworkLocation ( ) ; if ( clusterMap . remove ( node ) ) { InnerNode rack = ( InnerNode ) getNode ( node . getNetworkLocation ( ) ) ; if ( rack == null ) { if ( ! racks . remove ( currentRackName ) ) { LOG . error ( "Discrepancy between network topology and list of racks. " + "Removed rack " + currentRackName + " was not in the rack list." ) ; } } } LOG . debug ( "NetworkTopology became:\n" + this . toString ( ) ) ; } finally { netlock . writeLock ( ) . unlock ( ) ; } }
Remove a node Update node counter & rack counter if neccessary
31,733
public Node getNode ( String loc ) { netlock . readLock ( ) . lock ( ) ; try { loc = NodeBase . normalize ( loc ) ; if ( ! NodeBase . ROOT . equals ( loc ) ) loc = loc . substring ( 1 ) ; return clusterMap . getLoc ( loc ) ; } finally { netlock . readLock ( ) . unlock ( ) ; } }
Given a string representation of a node return its reference
31,734
public List < Node > getDatanodesInRack ( String loc ) { netlock . readLock ( ) . lock ( ) ; try { loc = NodeBase . normalize ( loc ) ; if ( ! NodeBase . ROOT . equals ( loc ) ) loc = loc . substring ( 1 ) ; InnerNode rack = ( InnerNode ) clusterMap . getLoc ( loc ) ; if ( rack == null ) return null ; return new ArrayList < Node > ( rack . getChildren ( ) ) ; } finally { netlock . readLock ( ) . unlock ( ) ; } }
Given a string representation of a rack return its children
31,735
public Set < String > getAllRacks ( ) { netlock . readLock ( ) . lock ( ) ; try { Set < String > result = new HashSet < String > ( this . racks ) ; result . addAll ( this . masterRacksSet ) ; return result ; } finally { netlock . readLock ( ) . unlock ( ) ; } }
Returns the set of racks
31,736
public int getDistance ( Node node1 , Node node2 ) { if ( node1 == node2 ) { return 0 ; } Node n1 = node1 , n2 = node2 ; int dis = 0 ; netlock . readLock ( ) . lock ( ) ; try { int level1 = node1 . getLevel ( ) , level2 = node2 . getLevel ( ) ; while ( n1 != null && level1 > level2 ) { n1 = n1 . getParent ( ) ; level1 -- ; dis ++ ; } while ( n2 != null && level2 > level1 ) { n2 = n2 . getParent ( ) ; level2 -- ; dis ++ ; } while ( n1 != null && n2 != null && n1 . getParent ( ) != n2 . getParent ( ) ) { n1 = n1 . getParent ( ) ; n2 = n2 . getParent ( ) ; dis += 2 ; } } finally { netlock . readLock ( ) . unlock ( ) ; } if ( n1 == null ) { LOG . warn ( "The cluster does not contain node: " + NodeBase . getPath ( node1 ) ) ; return Integer . MAX_VALUE ; } if ( n2 == null ) { LOG . warn ( "The cluster does not contain node: " + NodeBase . getPath ( node2 ) ) ; return Integer . MAX_VALUE ; } return dis + 2 ; }
Return the distance between two nodes It is assumed that the distance from one node to its parent is 1 The distance between two nodes is calculated by summing up their distances to their closest common ancestor .
31,737
public boolean isOnSameRack ( Node node1 , Node node2 ) { if ( node1 == null || node2 == null ) { return false ; } netlock . readLock ( ) . lock ( ) ; try { return node1 . getParent ( ) == node2 . getParent ( ) ; } finally { netlock . readLock ( ) . unlock ( ) ; } }
Check if two nodes are on the same rack
31,738
public String chooseRack ( Set < String > excludedRacks ) { String chosenRack = null ; HashSet < Integer > chosenIndexes = new HashSet < Integer > ( ) ; netlock . readLock ( ) . lock ( ) ; try { int totalRacks = getNumOfRacks ( ) ; if ( totalRacks - excludedRacks . size ( ) <= 0 ) return null ; while ( true ) { int rackIndex ; do { rackIndex = r . nextInt ( totalRacks ) ; } while ( chosenIndexes . contains ( rackIndex ) ) ; chosenIndexes . add ( rackIndex ) ; chosenRack = racks . get ( rackIndex ) ; if ( excludedRacks == null || ( ! excludedRacks . contains ( chosenRack ) ) ) { return chosenRack ; } if ( chosenIndexes . size ( ) == totalRacks ) { return null ; } } } finally { netlock . readLock ( ) . unlock ( ) ; } }
Choose a rack which is not in exlcudedRacks
31,739
public void run ( Context context ) throws IOException , InterruptedException { setup ( context ) ; while ( context . nextKeyValue ( ) ) { map ( context . getCurrentKey ( ) , context . getCurrentValue ( ) , context ) ; } cleanup ( context ) ; }
Expert users can override this method for more complete control over the execution of the Mapper .
31,740
private static int getBlockIdInFile ( DistributedFileSystem srcFs , Path srcPath , long blockId ) throws IOException { FileStatus srcStat = srcFs . getFileStatus ( srcPath ) ; LocatedBlocks lbs = srcFs . getClient ( ) . getLocatedBlocks ( srcPath . toUri ( ) . getPath ( ) , 0 , srcStat . getLen ( ) ) ; int i = 0 ; LOG . info ( "Look for block " + blockId + " in file " + srcPath ) ; for ( LocatedBlock lb : lbs . getLocatedBlocks ( ) ) { if ( lb . getBlock ( ) . getBlockId ( ) == blockId ) { return i ; } i ++ ; } return - 1 ; }
Get the index of a block in a file according to the blockId .
31,741
public static LocationPair getBlockLocation ( Codec codec , FileSystem srcFs , Path srcFile , int blockIdxInFile , Configuration conf , List < FileStatus > lfs ) throws IOException { int stripeIdx = 0 ; int blockIdxInStripe = 0 ; int blockIdx = blockIdxInFile ; if ( codec . isDirRaid ) { Path parentPath = srcFile . getParent ( ) ; if ( lfs == null ) { lfs = RaidNode . listDirectoryRaidFileStatus ( conf , srcFs , parentPath ) ; } if ( lfs == null ) { throw new IOException ( "Couldn't list files under " + parentPath ) ; } int blockNum = 0 ; Path qSrcFile = srcFs . makeQualified ( srcFile ) ; for ( FileStatus fsStat : lfs ) { if ( ! fsStat . getPath ( ) . equals ( qSrcFile ) ) { blockNum += RaidNode . getNumBlocks ( fsStat ) ; } else { blockNum += blockIdxInFile ; break ; } } blockIdx = blockNum ; } stripeIdx = blockIdx / codec . stripeLength ; blockIdxInStripe = blockIdx % codec . stripeLength ; return new LocationPair ( stripeIdx , blockIdxInStripe , lfs ) ; }
Given a block in the file and specific codec return the LocationPair object which contains id of the stripe it belongs to and its location in the stripe
31,742
public void createPartControl ( Composite parent ) { Tree main = new Tree ( parent , SWT . SINGLE | SWT . FULL_SELECTION | SWT . H_SCROLL | SWT . V_SCROLL ) ; main . setHeaderVisible ( true ) ; main . setLinesVisible ( false ) ; main . setLayoutData ( new GridData ( GridData . FILL_BOTH ) ) ; TreeColumn serverCol = new TreeColumn ( main , SWT . SINGLE ) ; serverCol . setText ( "Location" ) ; serverCol . setWidth ( 300 ) ; serverCol . setResizable ( true ) ; TreeColumn locationCol = new TreeColumn ( main , SWT . SINGLE ) ; locationCol . setText ( "Master node" ) ; locationCol . setWidth ( 185 ) ; locationCol . setResizable ( true ) ; TreeColumn stateCol = new TreeColumn ( main , SWT . SINGLE ) ; stateCol . setText ( "State" ) ; stateCol . setWidth ( 95 ) ; stateCol . setResizable ( true ) ; TreeColumn statusCol = new TreeColumn ( main , SWT . SINGLE ) ; statusCol . setText ( "Status" ) ; statusCol . setWidth ( 300 ) ; statusCol . setResizable ( true ) ; viewer = new TreeViewer ( main ) ; viewer . setContentProvider ( this ) ; viewer . setLabelProvider ( this ) ; viewer . setInput ( CONTENT_ROOT ) ; getViewSite ( ) . setSelectionProvider ( viewer ) ; getViewSite ( ) . getActionBars ( ) . setGlobalActionHandler ( ActionFactory . DELETE . getId ( ) , deleteAction ) ; getViewSite ( ) . getActionBars ( ) . getToolBarManager ( ) . add ( editServerAction ) ; getViewSite ( ) . getActionBars ( ) . getToolBarManager ( ) . add ( newLocationAction ) ; createActions ( ) ; createContextMenu ( ) ; }
Creates the columns for the view
31,743
public void skip ( K key ) throws IOException { if ( hasNext ( ) ) { while ( cmp . compare ( khead , key ) <= 0 && next ( ) ) ; } }
Skip key - value pairs with keys less than or equal to the key provided .
31,744
@ SuppressWarnings ( "unchecked" ) public void accept ( CompositeRecordReader . JoinCollector i , K key ) throws IOException { vjoin . clear ( ) ; if ( 0 == cmp . compare ( key , khead ) ) { do { vjoin . add ( vhead ) ; } while ( next ( ) && 0 == cmp . compare ( key , khead ) ) ; } i . add ( id , vjoin ) ; }
JoinCollector comes from parent which has
31,745
public boolean next ( K key , U value ) throws IOException { if ( hasNext ( ) ) { WritableUtils . cloneInto ( key , khead ) ; WritableUtils . cloneInto ( value , vhead ) ; next ( ) ; return true ; } return false ; }
Write key - value pair at the head of this stream to the objects provided ; get next key - value pair from proxied RR .
31,746
public static BlockPlacementPolicy getInstance ( Configuration conf , FSClusterStats stats , NetworkTopology clusterMap , HostsFileReader hostsReader , DNSToSwitchMapping dnsToSwitchMapping , FSNamesystem namesystem ) { Class < ? extends BlockPlacementPolicy > replicatorClass = conf . getClass ( "dfs.block.replicator.classname" , BlockPlacementPolicyDefault . class , BlockPlacementPolicy . class ) ; BlockPlacementPolicy replicator = ( BlockPlacementPolicy ) ReflectionUtils . newInstance ( replicatorClass , conf ) ; replicator . initialize ( conf , stats , clusterMap , hostsReader , dnsToSwitchMapping , namesystem ) ; return replicator ; }
Get an instance of the configured Block Placement Policy based on the value of the configuration paramater dfs . block . replicator . classname .
31,747
public static void setFilterClass ( Configuration conf , Class filterClass ) { conf . set ( FILTER_CLASS , filterClass . getName ( ) ) ; }
set the filter class
31,748
protected ExecutorService createExecutor ( ) { return Executors . newSingleThreadExecutor ( new ThreadFactoryBuilder ( ) . setDaemon ( true ) . setNameFormat ( "Logger channel to " + addr ) . setUncaughtExceptionHandler ( UncaughtExceptionHandlers . systemExit ( ) ) . build ( ) ) ; }
Separated out for easy overriding in tests .
31,749
private void heartbeatIfNecessary ( ) throws IOException { if ( lastHeartbeatStopwatch . elapsedMillis ( ) > HEARTBEAT_INTERVAL_MILLIS || ! lastHeartbeatStopwatch . isRunning ( ) ) { try { getProxy ( ) . heartbeat ( createReqInfo ( ) ) ; } finally { lastHeartbeatStopwatch . reset ( ) . start ( ) ; } } }
When we ve entered an out - of - sync state it s still useful to periodically send an empty RPC to the server such that it has the up to date committedTxId . This acts as a sanity check during recovery and also allows that node s metrics to be up - to - date about its lag .
31,750
public URL buildURLToFetchImage ( long txid ) { Preconditions . checkArgument ( txid >= - 1 , "Invalid segment: %s" , txid ) ; Preconditions . checkState ( httpPort != - 1 , "HTTP port not set yet" ) ; try { String path = GetJournalImageServlet . buildPath ( journalId , txid , nsInfo , true ) ; return new URL ( "http" , addr . getAddress ( ) . getHostAddress ( ) , httpPort , path . toString ( ) ) ; } catch ( MalformedURLException e ) { throw new IllegalStateException ( e ) ; } }
Build url to fetch image from the journal node to which this logger channel is attached .
31,751
public boolean moveAndDeleteRelativePath ( String volume , String pathName ) throws IOException { volume = normalizePath ( volume ) ; String newPathName = format . format ( new Date ( ) ) + "_" + uniqueId . getAndIncrement ( ) ; newPathName = TOBEDELETED + Path . SEPARATOR_CHAR + newPathName ; Path source = new Path ( volume , pathName ) ; Path target = new Path ( volume , newPathName ) ; try { if ( ! localFileSystem . rename ( source , target ) ) { if ( ! localFileSystem . exists ( source ) ) { return false ; } if ( ! localFileSystem . mkdirs ( new Path ( volume , TOBEDELETED ) ) ) { throw new IOException ( "Cannot create " + TOBEDELETED + " under " + volume ) ; } if ( ! localFileSystem . rename ( source , target ) ) { throw new IOException ( "Cannot rename " + source + " to " + target ) ; } } } catch ( FileNotFoundException e ) { return false ; } DeleteTask task = new DeleteTask ( volume , pathName , newPathName ) ; execute ( volume , task ) ; return true ; }
Move the path name on one volume to a temporary location and then delete them .
31,752
public boolean moveAndDeleteFromEachVolume ( String pathName ) throws IOException { boolean result = true ; for ( int i = 0 ; i < volumes . length ; i ++ ) { result = result && moveAndDeleteRelativePath ( volumes [ i ] , pathName ) ; } return result ; }
Move the path name on each volume to a temporary location and then delete them .
31,753
private static String getRelativePathName ( String absolutePathName , String volume ) { absolutePathName = normalizePath ( absolutePathName ) ; if ( ! absolutePathName . startsWith ( volume ) ) { return null ; } String fileName = absolutePathName . substring ( volume . length ( ) ) ; if ( fileName . charAt ( 0 ) == Path . SEPARATOR_CHAR ) { fileName = fileName . substring ( 1 ) ; } return fileName ; }
Get the relative path name with respect to the root of the volume .
31,754
public boolean moveAndDeleteAbsolutePath ( String absolutePathName ) throws IOException { for ( int v = 0 ; v < volumes . length ; v ++ ) { String relative = getRelativePathName ( absolutePathName , volumes [ v ] ) ; if ( relative != null ) { return moveAndDeleteRelativePath ( volumes [ v ] , relative ) ; } } throw new IOException ( "Cannot delete " + absolutePathName + " because it's outside of all volumes." ) ; }
Move the path name to a temporary location and then delete it .
31,755
public static String buildPath ( String journalId , long txid , NamespaceInfo nsInfo , boolean throttle ) { StringBuilder path = new StringBuilder ( "/getImage?getimage=1&" ) ; try { path . append ( JOURNAL_ID_PARAM ) . append ( "=" ) . append ( URLEncoder . encode ( journalId , "UTF-8" ) ) ; path . append ( "&" + TXID_PARAM ) . append ( "=" ) . append ( txid ) ; path . append ( "&" + THROTTLE_PARAM ) . append ( "=" ) . append ( throttle ) ; path . append ( "&" + STORAGEINFO_PARAM ) . append ( "=" ) . append ( URLEncoder . encode ( nsInfo . toColonSeparatedString ( ) , "UTF-8" ) ) ; } catch ( UnsupportedEncodingException e ) { throw new RuntimeException ( e ) ; } return path . toString ( ) ; }
Build path to fetch image at given txid for the given journal . This path does not contain address .
31,756
public void set ( Writable obj ) { instance = obj ; Class < ? extends Writable > instanceClazz = instance . getClass ( ) ; Class < ? extends Writable > [ ] clazzes = getTypes ( ) ; for ( int i = 0 ; i < clazzes . length ; i ++ ) { Class < ? extends Writable > clazz = clazzes [ i ] ; if ( clazz . equals ( instanceClazz ) ) { type = ( byte ) i ; return ; } } throw new RuntimeException ( "The type of instance is: " + instance . getClass ( ) + ", which is NOT registered." ) ; }
Set the instance that is wrapped .
31,757
public RemoteEditLogManifest getEditLogManifest ( long firstTxId ) throws IOException { File currentDir = sd . getCurrentDir ( ) ; List < EditLogFile > allLogFiles = matchEditLogs ( FileUtil . listFiles ( currentDir ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( allLogFiles ) ; } List < RemoteEditLog > ret = new ArrayList < RemoteEditLog > ( allLogFiles . size ( ) ) ; for ( EditLogFile elf : allLogFiles ) { if ( elf . isCorrupt ( ) ) continue ; if ( elf . getFirstTxId ( ) >= firstTxId ) { ret . add ( new RemoteEditLog ( elf . firstTxId , elf . lastTxId , elf . isInProgress ) ) ; } else if ( ( firstTxId > elf . getFirstTxId ( ) ) && ( firstTxId <= elf . getLastTxId ( ) ) ) { throw new IOException ( "Asked for firstTxId " + firstTxId + " which is in the middle of file " + elf . file ) ; } } Collections . sort ( ret ) ; return new RemoteEditLogManifest ( ret ) ; }
Find all editlog segments starting at or above the given txid . Include inprogress segments . Notice that the segments do not have to be contiguous . JournalSet handles the holes between segments .
31,758
public List < EditLogFile > getLogFiles ( long fromTxId , boolean enforceBoundary ) throws IOException { File currentDir = sd . getCurrentDir ( ) ; List < EditLogFile > allLogFiles = matchEditLogs ( currentDir . listFiles ( ) ) ; List < EditLogFile > logFiles = new ArrayList < EditLogFile > ( ) ; for ( EditLogFile elf : allLogFiles ) { if ( enforceBoundary && fromTxId > elf . getFirstTxId ( ) && fromTxId <= elf . getLastTxId ( ) ) { throw new IOException ( "Asked for fromTxId " + fromTxId + " which is in middle of file " + elf . file ) ; } if ( fromTxId <= elf . getFirstTxId ( ) ) { logFiles . add ( elf ) ; } } Collections . sort ( logFiles , EditLogFile . COMPARE_BY_START_TXID ) ; return logFiles ; }
Get all edit log segments
31,759
private long findMaxTransaction ( ) throws IOException { for ( EditLogFile elf : getLogFiles ( 0 ) ) { if ( elf . isInProgress ( ) ) { maxSeenTransaction = Math . max ( elf . getFirstTxId ( ) , maxSeenTransaction ) ; } maxSeenTransaction = Math . max ( elf . getLastTxId ( ) , maxSeenTransaction ) ; } return maxSeenTransaction ; }
Find the maximum transaction in the journal . This gets stored in a member variable as corrupt edit logs will be moved aside but we still need to remember their first tranaction id in the case that it was the maximum transaction in the journal .
31,760
void init ( JobID jobId ) { this . startTime = JobTracker . getClock ( ) . getTime ( ) ; this . id = new TaskID ( jobId , isMapTask ( ) , partition ) ; this . skipping = startSkipping ( ) ; long speculativeDuration ; if ( isMapTask ( ) ) { this . speculativeLag = conf . getMapSpeculativeLag ( ) ; speculativeDuration = conf . getMapSpeculativeDuration ( ) ; } else { this . speculativeLag = conf . getReduceSpeculativeLag ( ) ; speculativeDuration = conf . getReduceSpeculativeDuration ( ) ; } if ( speculativeDuration > 0 ) { this . maxProgressRateForSpeculation = 1.0 / ( 1000.0 * speculativeDuration ) ; } else { this . maxProgressRateForSpeculation = - 1.0 ; } this . useProcessingRateForSpeculation = conf . getBoolean ( "mapreduce.job.speculative.using.processing.rate" , false ) ; }
Initialization common to Map and Reduce
31,761
public long getDispatchTime ( TaskAttemptID taskid ) { Long l = dispatchTimeMap . get ( taskid ) ; if ( l != null ) { return l . longValue ( ) ; } return 0 ; }
Return the dispatch time
31,762
public void setDispatchTime ( TaskAttemptID taskid , long disTime ) { dispatchTimeMap . put ( taskid , disTime ) ; this . lastDispatchTime = disTime ; }
Set the dispatch time
31,763
public boolean shouldClose ( TaskAttemptID taskid ) { if ( tasksReportedClosed . contains ( taskid ) ) { if ( tasksToKill . keySet ( ) . contains ( taskid ) ) return true ; else return false ; } boolean close = false ; TaskStatus ts = taskStatuses . get ( taskid ) ; if ( ( ts != null ) && ( ( this . failed ) || ( ( job . getStatus ( ) . getRunState ( ) != JobStatus . RUNNING && ( job . getStatus ( ) . getRunState ( ) != JobStatus . PREP ) ) ) ) ) { tasksReportedClosed . add ( taskid ) ; close = true ; } else if ( ( completes > 0 ) && ! ( isMapTask ( ) && ! jobSetup && ! jobCleanup && isComplete ( taskid ) ) ) { tasksReportedClosed . add ( taskid ) ; close = true ; } else if ( isCommitPending ( taskid ) && ! shouldCommit ( taskid ) ) { tasksReportedClosed . add ( taskid ) ; close = true ; } else { close = tasksToKill . keySet ( ) . contains ( taskid ) ; } return close ; }
Returns whether a component task - thread should be closed because the containing JobInProgress has completed or the task is killed by the user
31,764
synchronized TaskReport generateSingleReport ( ) { ArrayList < String > diagnostics = new ArrayList < String > ( ) ; for ( List < String > l : taskDiagnosticData . values ( ) ) { diagnostics . addAll ( l ) ; } TIPStatus currentStatus = null ; if ( isRunning ( ) && ! isComplete ( ) ) { currentStatus = TIPStatus . RUNNING ; } else if ( isComplete ( ) ) { currentStatus = TIPStatus . COMPLETE ; } else if ( wasKilled ( ) ) { currentStatus = TIPStatus . KILLED ; } else if ( isFailed ( ) ) { currentStatus = TIPStatus . FAILED ; } else if ( ! ( isComplete ( ) || isRunning ( ) || wasKilled ( ) ) ) { currentStatus = TIPStatus . PENDING ; } TaskReport report = new TaskReport ( getTIPId ( ) , ( float ) progress , state , diagnostics . toArray ( new String [ diagnostics . size ( ) ] ) , currentStatus , execStartTime , execFinishTime , counters ) ; if ( currentStatus == TIPStatus . RUNNING ) { report . setRunningTaskAttempts ( activeTasks . keySet ( ) ) ; } else if ( currentStatus == TIPStatus . COMPLETE ) { report . setSuccessfulAttempt ( getSuccessfulTaskid ( ) ) ; } return report ; }
Creates a status report for this task . Includes the task ID and overall status plus reports for all the component task - threads that have ever been started .
31,765
public void addDiagnosticInfo ( TaskAttemptID taskId , String diagInfo ) { List < String > diagHistory = taskDiagnosticData . get ( taskId ) ; if ( diagHistory == null ) { diagHistory = new ArrayList < String > ( ) ; taskDiagnosticData . put ( taskId , diagHistory ) ; } diagHistory . add ( diagInfo ) ; }
Save diagnostic information for a given task .
31,766
public void incompleteSubTask ( TaskAttemptID taskid , JobStatus jobStatus ) { TaskStatus status = taskStatuses . get ( taskid ) ; String trackerName ; String trackerHostName = null ; TaskStatus . State taskState = TaskStatus . State . FAILED ; if ( status != null ) { trackerName = status . getTaskTracker ( ) ; trackerHostName = JobInProgressTraits . convertTrackerNameToHostName ( trackerName ) ; Boolean shouldFail = tasksToKill . remove ( taskid ) ; if ( shouldFail != null ) { if ( status . getRunState ( ) == TaskStatus . State . FAILED || status . getRunState ( ) == TaskStatus . State . KILLED ) { taskState = ( shouldFail ) ? TaskStatus . State . FAILED : TaskStatus . State . KILLED ; } else { taskState = ( shouldFail ) ? TaskStatus . State . FAILED_UNCLEAN : TaskStatus . State . KILLED_UNCLEAN ; } status . setRunState ( taskState ) ; addDiagnosticInfo ( taskid , "Task has been " + taskState + " by the user" ) ; } taskState = status . getRunState ( ) ; if ( taskState != TaskStatus . State . FAILED && taskState != TaskStatus . State . KILLED && taskState != TaskStatus . State . FAILED_UNCLEAN && taskState != TaskStatus . State . KILLED_UNCLEAN ) { LOG . info ( "Task '" + taskid + "' running on '" + trackerName + "' in state: '" + taskState + "' being failed!" ) ; status . setRunState ( TaskStatus . State . FAILED ) ; taskState = TaskStatus . State . FAILED ; } if ( 0 == status . getFinishTime ( ) ) { status . setFinishTime ( JobTracker . getClock ( ) . getTime ( ) ) ; } } this . activeTasks . remove ( taskid ) ; if ( this . isMapTask ( ) && ! jobSetup && ! jobCleanup && isComplete ( taskid ) && jobStatus . getRunState ( ) != JobStatus . SUCCEEDED ) { this . completes -- ; resetSuccessfulTaskid ( ) ; } if ( tasks . contains ( taskid ) ) { if ( taskState == TaskStatus . State . FAILED ) { numTaskFailures ++ ; machinesWhereFailed . add ( trackerHostName ) ; if ( maxSkipRecords > 0 ) { LOG . debug ( "TaskInProgress adding" + status . getNextRecordRange ( ) ) ; failedRanges . add ( status . getNextRecordRange ( ) ) ; skipping = startSkipping ( ) ; } } else if ( taskState == TaskStatus . State . KILLED ) { numKilledTasks ++ ; } } if ( numTaskFailures >= maxTaskAttempts ) { LOG . info ( "TaskInProgress " + getTIPId ( ) + " has failed " + numTaskFailures + " times." ) ; kill ( ) ; } }
Indicate that one of the taskids in this TaskInProgress has failed .
31,767
public void completed ( TaskAttemptID taskid ) { completedTask ( taskid , TaskStatus . State . SUCCEEDED ) ; setSuccessfulTaskid ( taskid ) ; this . completes ++ ; this . execFinishTime = JobTracker . getClock ( ) . getTime ( ) ; recomputeProgress ( ) ; }
Indicate that one of the taskids in this TaskInProgress has successfully completed!
31,768
boolean killTask ( TaskAttemptID taskId , boolean shouldFail , String diagnosticInfo ) { TaskStatus st = taskStatuses . get ( taskId ) ; if ( st != null && ( st . getRunState ( ) == TaskStatus . State . RUNNING || st . getRunState ( ) == TaskStatus . State . COMMIT_PENDING || st . inTaskCleanupPhase ( ) || st . getRunState ( ) == TaskStatus . State . UNASSIGNED ) && tasksToKill . put ( taskId , shouldFail ) == null ) { addDiagnosticInfo ( taskId , diagnosticInfo ) ; LOG . info ( diagnosticInfo ) ; return true ; } return false ; }
Kill the given task
31,769
boolean canBeSpeculated ( long currentTime ) { if ( skipping || ! isRunnable ( ) || ! isRunning ( ) || completes != 0 || isOnlyCommitPending ( ) || activeTasks . size ( ) > MAX_TASK_EXECS ) { if ( isMapTask ( ) ? job . shouldLogCannotspeculativeMaps ( ) : job . shouldLogCannotspeculativeReduces ( ) ) { LOG . info ( "Task " + getTIPId ( ) + " cannot be speculated because of " + "skipping = " + skipping + " isRunnable() = " + isRunnable ( ) + " isRunning() = " + isRunning ( ) + " completes = " + completes + " isOnlyCommitPending() = " + isOnlyCommitPending ( ) + " activetask-size = " + activeTasks . size ( ) + " MAX_TASK_EXECS = " + MAX_TASK_EXECS ) ; } return false ; } if ( isSpeculativeForced ( ) ) { return true ; } if ( currentTime - lastDispatchTime < speculativeLag ) { if ( isMapTask ( ) ? job . shouldLogCannotspeculativeMaps ( ) : job . shouldLogCannotspeculativeReduces ( ) ) { LOG . info ( "Task " + getTIPId ( ) + " cannot be speculated because of " + "no speculation for first few seconds" ) ; } return false ; } if ( ( maxProgressRateForSpeculation > 0 ) && ( progressRate > maxProgressRateForSpeculation ) ) { if ( isMapTask ( ) ? job . shouldLogCannotspeculativeMaps ( ) : job . shouldLogCannotspeculativeReduces ( ) ) { LOG . info ( "Task " + getTIPId ( ) + " cannot be speculated because " + "the task progress rate is fast enough to complete." + " maxProgressRateForSpeculation = " + maxProgressRateForSpeculation + " and progressRate = " + progressRate ) ; } return false ; } if ( isMapTask ( ) ? job . shouldSpeculateAllRemainingMaps ( ) : job . shouldSpeculateAllRemainingReduces ( ) ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Speculate " + getTIPId ( ) + " because the job is almost finished" ) ; } return true ; } if ( useProcessingRateForSpeculation ) { return canBeSpeculatedUsingProcessingRate ( currentTime ) ; } else { return canBeSpeculatedUsingProgressRate ( currentTime ) ; } }
Can this task be speculated? This requires that it isn t done or almost done and that it isn t already being speculatively executed .
31,770
public Task getTaskToRun ( String taskTracker ) { TaskAttemptID taskid = null ; if ( nextTaskId < ( MAX_TASK_EXECS + maxTaskAttempts + numKilledTasks ) ) { int attemptId = job . getNumRestarts ( ) * NUM_ATTEMPTS_PER_RESTART + nextTaskId ; taskid = new TaskAttemptID ( id , attemptId ) ; ++ nextTaskId ; } else { LOG . warn ( "Exceeded limit of " + ( MAX_TASK_EXECS + maxTaskAttempts ) + " (plus " + numKilledTasks + " killed)" + " attempts for the tip '" + getTIPId ( ) + "'" ) ; return null ; } setDispatchTime ( taskid , JobTracker . getClock ( ) . getTime ( ) ) ; if ( 0 == execStartTime ) { execStartTime = JobTracker . getClock ( ) . getTime ( ) ; } return addRunningTask ( taskid , taskTracker ) ; }
Return a Task that can be sent to a TaskTracker for execution .
31,771
public Task addRunningTask ( TaskAttemptID taskid , String taskTracker , boolean taskCleanup ) { int numSlotsNeeded = taskCleanup ? 1 : numSlotsRequired ; Task t = null ; if ( isMapTask ( ) ) { LOG . debug ( "attempt " + numTaskFailures + " sending skippedRecords " + failedRanges . getIndicesCount ( ) ) ; String splitClass = null ; BytesWritable split ; if ( ! jobSetup && ! jobCleanup ) { splitClass = rawSplit . getClassName ( ) ; split = rawSplit . getBytes ( ) ; } else { split = new BytesWritable ( ) ; } t = new MapTask ( jobFile , taskid , partition , splitClass , split , numSlotsNeeded , job . getUser ( ) ) ; } else { t = new ReduceTask ( jobFile , taskid , partition , numMaps , numSlotsNeeded , job . getUser ( ) ) ; } if ( jobCleanup ) { t . setJobCleanupTask ( ) ; } if ( jobSetup ) { t . setJobSetupTask ( ) ; } if ( taskCleanup ) { t . setTaskCleanupTask ( ) ; t . setState ( taskStatuses . get ( taskid ) . getRunState ( ) ) ; cleanupTasks . put ( taskid , taskTracker ) ; } t . setConf ( conf ) ; LOG . debug ( "Launching task with skipRanges:" + failedRanges . getSkipRanges ( ) ) ; t . setSkipRanges ( failedRanges . getSkipRanges ( ) ) ; t . setSkipping ( skipping ) ; if ( failedRanges . isTestAttempt ( ) ) { t . setWriteSkipRecs ( false ) ; } if ( activeTasks . size ( ) >= 1 ) { speculativeTaskId = taskid ; } else { speculativeTaskId = null ; } activeTasks . put ( taskid , taskTracker ) ; tasks . add ( taskid ) ; if ( firstTaskId == null ) { firstTaskId = taskid ; } return t ; }
Adds a previously running task to this tip . This is used in case of jobtracker restarts .
31,772
public boolean hasRunOnMachine ( String trackerHost , String trackerName ) { return this . activeTasks . values ( ) . contains ( trackerName ) || hasFailedOnMachine ( trackerHost ) ; }
Was this task ever scheduled to run on this machine?
31,773
public String getSplitNodes ( ) { if ( ! isMapTask ( ) || jobSetup || jobCleanup ) { return "" ; } String [ ] nodes = rawSplit . getLocations ( ) ; if ( nodes == null || nodes . length == 0 ) { return "" ; } StringBuffer ret = new StringBuffer ( nodes [ 0 ] ) ; for ( int i = 1 ; i < nodes . length ; i ++ ) { ret . append ( "," ) ; ret . append ( nodes [ i ] ) ; } return ret . toString ( ) ; }
Gets the Node list of input split locations sorted in rack order .
31,774
public void updateProgressRate ( long currentTime ) { double bestProgressRate = 0 ; for ( TaskStatus ts : taskStatuses . values ( ) ) { if ( ts . getRunState ( ) == TaskStatus . State . RUNNING || ts . getRunState ( ) == TaskStatus . State . SUCCEEDED || ts . getRunState ( ) == TaskStatus . State . COMMIT_PENDING ) { double tsProgressRate = ts . getProgress ( ) / Math . max ( 1 , currentTime - getDispatchTime ( ts . getTaskID ( ) ) ) ; if ( tsProgressRate > bestProgressRate ) { bestProgressRate = tsProgressRate ; } } } DataStatistics taskStats = job . getRunningTaskStatistics ( isMapTask ( ) ) ; taskStats . updateStatistics ( progressRate , bestProgressRate ) ; progressRate = bestProgressRate ; }
update progress rate for a task
31,775
private void updateJobStats ( Phase phase , ProcessingRates oldRates , ProcessingRates newRates ) { DataStatistics stats = job . getRunningTaskStatistics ( phase ) ; stats . updateStatistics ( oldRates . getRate ( phase ) , newRates . getRate ( phase ) ) ; }
Helper function that updates the processing rates stats for this job . Only updates the rate in the corresponding phase .
31,776
private int getAvailableSlots ( TaskTrackerStatus tts , TaskType type ) { return getMaxSlots ( tts , type ) - occupiedSlotsAfterHeartbeat ( tts , type ) ; }
Obtain the how many more slots can be scheduled on this tasktracker
31,777
private int occupiedSlotsAfterHeartbeat ( TaskTrackerStatus tts , TaskType type ) { int occupied = ( type == TaskType . MAP ) ? tts . countOccupiedMapSlots ( ) - tts . getMapsReleased ( ) : tts . countOccupiedReduceSlots ( ) - tts . getReducesReleased ( ) ; return occupied ; }
Obtain the number of occupied slots after the scheduled kills are done
31,778
private void updateLocalityWaitTimes ( long currentTime ) { long timeSinceLastHeartbeat = ( lastHeartbeatTime == 0 ? 0 : currentTime - lastHeartbeatTime ) ; lastHeartbeatTime = currentTime ; for ( JobInfo info : infos . values ( ) ) { if ( info . skippedAtLastHeartbeat ) { info . timeWaitedForLocalMap += timeSinceLastHeartbeat ; info . skippedAtLastHeartbeat = false ; } } }
Update locality wait times for jobs that were skipped at last heartbeat .
31,779
private void updateLastMapLocalityLevel ( JobInProgress job , Task mapTaskLaunched , TaskTrackerStatus tracker ) { JobInfo info = infos . get ( job ) ; LocalityLevel localityLevel = localManager . taskToLocalityLevel ( job , mapTaskLaunched , tracker ) ; info . lastMapLocalityLevel = localityLevel ; info . timeWaitedForLocalMap = 0 ; }
Update a job s locality level and locality wait variables given that that it has just launched a map task on a given task tracker .
31,780
protected void update ( ) { ClusterStatus clusterStatus = taskTrackerManager . getClusterStatus ( ) ; if ( autoComputeLocalityDelay ) { JobTracker jobTracker = ( JobTracker ) taskTrackerManager ; localityDelayNodeLocal = Math . min ( MAX_AUTOCOMPUTED_LOCALITY_DELAY , ( long ) ( 1.5 * jobTracker . getNextHeartbeatInterval ( ) ) ) ; localityDelayRackLocal = localityDelayNodeLocal ; } synchronized ( this ) { if ( poolMgr . reloadAllocsIfNecessary ( ) ) { poolMgr . checkMinimumSlotsAvailable ( clusterStatus , TaskType . MAP ) ; poolMgr . checkMinimumSlotsAvailable ( clusterStatus , TaskType . REDUCE ) ; } List < JobInProgress > toRemove = new ArrayList < JobInProgress > ( ) ; for ( JobInProgress job : infos . keySet ( ) ) { int runState = job . getStatus ( ) . getRunState ( ) ; if ( runState == JobStatus . SUCCEEDED || runState == JobStatus . FAILED || runState == JobStatus . KILLED ) { toRemove . add ( job ) ; } } for ( JobInProgress job : toRemove ) { infos . remove ( job ) ; poolMgr . removeJob ( job ) ; } long now = clock . getTime ( ) ; long timeDelta = now - lastUpdateTime ; updateDeficits ( timeDelta ) ; updateRunnability ( ) ; updateTaskCounts ( ) ; updateWeights ( ) ; updateMinAndMaxSlots ( ) ; updateFairShares ( clusterStatus ) ; if ( preemptionEnabled ) { updatePreemptionVariables ( ) ; } sortJobs ( ) ; updatePoolMetrics ( ) ; dumpStatus ( now ) ; lastUpdateTime = now ; } }
Recompute the internal variables used by the scheduler - per - job weights fair shares deficits minimum slot allocations and numbers of running and needed tasks of each type .
31,781
private BlockedAdmissionReason adjustClusterwideReason ( AdmissionControlData admissionControlData , BlockedAdmissionReason originalReason , String poolName ) { BlockedAdmissionReason clusterwideReason = ( BlockedAdmissionReason . underClusterwideAdmissionControl ( admissionControlData . getSoftTaskLimit ( ) , admissionControlData . getHardTaskLimit ( ) , admissionControlData . getTotalTasks ( ) , ! poolMgr . isSystemPool ( poolName ) ) ) ; return ( clusterwideReason == BlockedAdmissionReason . NONE ) ? originalReason : clusterwideReason ; }
Based on the original reason and admission control data adjust the reason this job is not admitted if any .
31,782
synchronized Collection < NotAdmittedJobInfo > getNotAdmittedJobs ( ) { List < NotAdmittedJobInfo > jobInfoList = new ArrayList < NotAdmittedJobInfo > ( infos . size ( ) ) ; AdmissionControlData admissionControlData = jobInitializer . getAdmissionControlData ( ) ; float averageWaitMsecsPerHardAdmissionJob = jobInitializer . getAverageWaitMsecsPerHardAdmissionJob ( ) ; for ( Map . Entry < JobInProgress , JobInfo > entry : infos . entrySet ( ) ) { JobInProgress job = entry . getKey ( ) ; JobInfo jobInfo = entry . getValue ( ) ; if ( ! jobInfo . needsInitializing ) { continue ; } String poolName = poolMgr . getPoolName ( job ) ; BlockedAdmissionReason reason = adjustClusterwideReason ( admissionControlData , jobInfo . reason , poolName ) ; jobInfoList . add ( new NotAdmittedJobInfo ( job . getStartTime ( ) , job . getJobID ( ) . toString ( ) , job . getJobConf ( ) . getUser ( ) , poolName , job . getPriority ( ) . toString ( ) , reason , jobInfo . reasonLimit , jobInfo . actualValue , jobInfo . hardAdmissionPosition , averageWaitMsecsPerHardAdmissionJob ) ) ; } return jobInfoList ; }
Get the jobs that were not admitted and all the info needed for display . The reasons of why the jobs were not admitted were set by the fair scheduler but will be adjusted when this method is called based on the current job initializer admission control data .
31,783
private synchronized String getJobNotAdmittedReason ( JobInProgress job , JobAdmissionWaitInfo waitInfo ) { JobInfo jobInfo = infos . get ( job ) ; if ( jobInfo == null ) { return "Unknown, can't find job" ; } AdmissionControlData admissionControlData = jobInitializer . getAdmissionControlData ( ) ; return NotAdmittedJobInfo . getReasoning ( adjustClusterwideReason ( admissionControlData , jobInfo . reason , poolMgr . getPoolName ( job ) ) , jobInfo . reasonLimit , jobInfo . actualValue , jobInfo . hardAdmissionPosition , waitInfo ) ; }
Get a stringified reason for not admitting a job .
31,784
private void dumpStatus ( long now ) { if ( now - lastDumpStatusTime < dumpStatusPeriod ) { return ; } lastDumpStatusTime = now ; logJobStats ( infos . keySet ( ) , TaskType . MAP ) ; logJobStats ( infos . keySet ( ) , TaskType . REDUCE ) ; dumpSpeculationStatus ( now ) ; }
Output some scheduling information to LOG
31,785
private void slowerButAccurateCountTasks ( JobInfo info , JobInProgress job ) { int totalMaps = job . numMapTasks ; int finishedMaps = 0 ; int runningMaps = 0 ; int runningMapTips = 0 ; for ( TaskInProgress tip : job . getTasks ( org . apache . hadoop . mapreduce . TaskType . MAP ) ) { if ( tip . isComplete ( ) ) { finishedMaps += 1 ; } else if ( tip . isRunning ( ) ) { runningMaps += tip . getActiveTasks ( ) . size ( ) ; runningMapTips += 1 ; } } info . totalInitedTasks = job . numMapTasks + job . numReduceTasks ; info . runningMaps = runningMaps ; infosummary . totalRunningMaps += runningMaps ; poolMgr . incRunningTasks ( info . poolName , TaskType . MAP , runningMaps ) ; info . neededSpeculativeMaps = taskSelector . neededSpeculativeMaps ( job ) ; info . neededMaps = ( totalMaps - runningMapTips - finishedMaps + info . neededSpeculativeMaps ) ; int totalReduces = job . numReduceTasks ; int finishedReduces = 0 ; int runningReduces = 0 ; int runningReduceTips = 0 ; for ( TaskInProgress tip : job . getTasks ( org . apache . hadoop . mapreduce . TaskType . REDUCE ) ) { if ( tip . isComplete ( ) ) { finishedReduces += 1 ; } else if ( tip . isRunning ( ) ) { runningReduces += tip . getActiveTasks ( ) . size ( ) ; runningReduceTips += 1 ; } } info . runningReduces = runningReduces ; infosummary . totalRunningReduces += runningReduces ; poolMgr . incRunningTasks ( info . poolName , TaskType . REDUCE , runningReduces ) ; if ( job . scheduleReduces ( ) ) { info . neededSpeculativeReduces = taskSelector . neededSpeculativeReduces ( job ) ; info . neededReduces = ( totalReduces - runningReduceTips - finishedReduces + info . neededSpeculativeReduces ) ; } else { info . neededReduces = 0 ; } }
Obtain task counts for a job by scanning all the tasks of all running jobs .
31,786
private void fifoWeightAdjust ( Pool pool ) { List < JobInProgress > jobs = new ArrayList < JobInProgress > ( ) ; jobs . addAll ( pool . getJobs ( ) ) ; Collections . sort ( jobs , fifoComparator ) ; double factor = 1.0 ; for ( JobInProgress job : jobs ) { JobInfo info = infos . get ( job ) ; if ( info == null ) { throw new IllegalStateException ( "Couldn't find job " + job . jobId + " in pool " + pool . getName ( ) ) ; } info . mapWeight *= factor ; info . reduceWeight *= factor ; factor *= FIFO_WEIGHT_DECAY_FACTOR ; } }
Boost the weight for the older jobs .
31,787
private boolean incSlotLimit ( JobInfo info , TaskType type , LimitType limit ) { switch ( limit ) { case MIN : if ( type == TaskType . MAP ) { if ( info . minMaps < runnableTasks ( info , type ) ) { info . minMaps += 1 ; return true ; } } else { if ( info . minReduces < runnableTasks ( info , type ) ) { info . minReduces += 1 ; return true ; } } return false ; case MAX : if ( type == TaskType . MAP ) { if ( info . maxMaps < runnableTasks ( info , type ) ) { info . maxMaps += 1 ; return true ; } } else { if ( info . maxReduces < runnableTasks ( info , type ) ) { info . maxReduces += 1 ; return true ; } } return false ; } return false ; }
Increment the slot limit of a job
31,788
private double computeShare ( JobInfo info , double w2sRatio , TaskType type , boolean considerMinMax ) { if ( ! isRunnable ( info ) ) { return 0 ; } double share = type == TaskType . MAP ? info . mapWeight : info . reduceWeight ; share *= w2sRatio ; if ( considerMinMax ) { int minSlots = type == TaskType . MAP ? info . minMaps : info . minReduces ; share = Math . max ( share , minSlots ) ; int maxSlots = type == TaskType . MAP ? info . maxMaps : info . maxReduces ; share = Math . min ( share , maxSlots ) ; } share = Math . min ( share , runnableTasks ( info , type ) ) ; return share ; }
Compute the number of slots assigned to a job given a particular weight - to - slot ratio w2sRatio .
31,789
protected int neededTasks ( JobInfo info , TaskType taskType ) { if ( info == null ) return 0 ; return taskType == TaskType . MAP ? info . neededMaps : info . neededReduces ; }
returning 0 s for jobs with no JobInfo present .
31,790
boolean isStarvedForMinShare ( JobInfo info , TaskType taskType ) { float starvingThreshold = ( float ) ( minTasks ( info , taskType ) * 0.9 ) ; return runningTasks ( info , taskType ) < starvingThreshold ; }
Is a job below 90% of its min share for the given task type?
31,791
protected void preemptTasksIfNecessary ( ) { if ( ! preemptionEnabled || jobComparator == JobComparator . FIFO ) return ; long curTime = clock . getTime ( ) ; if ( curTime - lastPreemptCheckTime < preemptionInterval ) return ; lastPreemptCheckTime = curTime ; int currentMaxPreemptibleTasks = maxPreemptibleTasks ; boolean currentCountNonPreemptibleTasks = countNonPreemptibleTasks ; synchronized ( taskTrackerManager ) { synchronized ( this ) { List < JobInProgress > jobs = new ArrayList < JobInProgress > ( infos . keySet ( ) ) ; for ( TaskType type : MAP_AND_REDUCE ) { int tasksToPreempt = 0 ; for ( JobInProgress job : jobs ) { if ( ! currentCountNonPreemptibleTasks && ! canBePreempted ( job ) ) { continue ; } tasksToPreempt += tasksToPreempt ( job , type , curTime ) ; } if ( tasksToPreempt > 0 ) { logJobStats ( sortedJobsByMapNeed , TaskType . MAP ) ; logJobStats ( sortedJobsByReduceNeed , TaskType . REDUCE ) ; } int actualTasksToPreempt = tasksToPreempt ; if ( ( currentMaxPreemptibleTasks >= 0 ) && ( tasksToPreempt > currentMaxPreemptibleTasks ) ) { actualTasksToPreempt = currentMaxPreemptibleTasks ; } LOG . info ( "preemptTasksIfNecessary: Should preempt " + tasksToPreempt + " " + type + " tasks, actually preempting " + actualTasksToPreempt + " tasks, countNonPreemptibleTasks = " + countNonPreemptibleTasks ) ; preemptTasks ( jobs , type , tasksToPreempt ) ; } } } }
Check for jobs that need tasks preempted either because they have been below their guaranteed share for their pool s preemptionTimeout or they have been below half their fair share for the fairSharePreemptionTimeout . If such jobs exist compute how many tasks of each type need to be preempted and then select the right ones using selectTasksToPreempt .
31,792
public String getDatanodeReport ( ) { StringBuffer buffer = new StringBuffer ( ) ; long c = getCapacity ( ) ; long r = getRemaining ( ) ; long u = getDfsUsed ( ) ; long nonDFSUsed = getNonDfsUsed ( ) ; float usedPercent = getDfsUsedPercent ( ) ; float remainingPercent = getRemainingPercent ( ) ; buffer . append ( "Name: " + name + "\n" ) ; if ( ! NetworkTopology . DEFAULT_RACK . equals ( location ) ) { buffer . append ( "Rack: " + location + "\n" ) ; } buffer . append ( "Decommission Status : " ) ; if ( isDecommissioned ( ) ) { buffer . append ( "Decommissioned\n" ) ; } else if ( isDecommissionInProgress ( ) ) { buffer . append ( "Decommission in progress\n" ) ; } else { buffer . append ( "Normal\n" ) ; } buffer . append ( "Configured Capacity: " + c + " (" + StringUtils . byteDesc ( c ) + ")" + "\n" ) ; buffer . append ( "DFS Used: " + u + " (" + StringUtils . byteDesc ( u ) + ")" + "\n" ) ; buffer . append ( "Non DFS Used: " + nonDFSUsed + " (" + StringUtils . byteDesc ( nonDFSUsed ) + ")" + "\n" ) ; buffer . append ( "DFS Remaining: " + r + "(" + StringUtils . byteDesc ( r ) + ")" + "\n" ) ; buffer . append ( "DFS Used%: " + StringUtils . limitDecimalTo2 ( usedPercent ) + "%\n" ) ; buffer . append ( "DFS Remaining%: " + StringUtils . limitDecimalTo2 ( remainingPercent ) + "%\n" ) ; buffer . append ( "Last contact: " + new Date ( lastUpdate ) + "\n" ) ; return buffer . toString ( ) ; }
A formatted string for reporting the status of the DataNode .
31,793
public String dumpDatanode ( ) { StringBuffer buffer = new StringBuffer ( ) ; long c = getCapacity ( ) ; long r = getRemaining ( ) ; long u = getDfsUsed ( ) ; buffer . append ( name ) ; if ( ! NetworkTopology . DEFAULT_RACK . equals ( location ) ) { buffer . append ( " " + location ) ; } if ( isDecommissioned ( ) ) { buffer . append ( " DD" ) ; } else if ( isDecommissionInProgress ( ) ) { buffer . append ( " DP" ) ; } else { buffer . append ( " IN" ) ; } buffer . append ( " " + c + "(" + StringUtils . byteDesc ( c ) + ")" ) ; buffer . append ( " " + u + "(" + StringUtils . byteDesc ( u ) + ")" ) ; buffer . append ( " " + StringUtils . limitDecimalTo2 ( ( ( 1.0 * u ) / c ) * 100 ) + "%" ) ; buffer . append ( " " + r + "(" + StringUtils . byteDesc ( r ) + ")" ) ; buffer . append ( " " + new Date ( lastUpdate ) ) ; return buffer . toString ( ) ; }
A formatted string for printing the status of the DataNode .
31,794
protected void setAdminState ( AdminStates newState ) { if ( newState == AdminStates . NORMAL ) { adminState = null ; } else { adminState = newState ; } }
Sets the admin state of this node .
31,795
private void parse ( String [ ] args ) { Options cliOpts = setupOptions ( ) ; BasicParser parser = new BasicParser ( ) ; CommandLine cl = null ; try { try { cl = parser . parse ( cliOpts , args ) ; } catch ( ParseException ex ) { throw new IllegalArgumentException ( "args = " + Arrays . toString ( args ) ) ; } int newThreshold = Integer . parseInt ( cl . getOptionValue ( "threshold" , "10" ) ) ; int iterationTime = Integer . parseInt ( cl . getOptionValue ( "iter_len" , String . valueOf ( maxIterationTime / ( 60 * 1000 ) ) ) ) ; maxConcurrentMoves = Integer . parseInt ( cl . getOptionValue ( "node_par_moves" , String . valueOf ( MAX_NUM_CONCURRENT_MOVES ) ) ) ; moveThreads = Integer . parseInt ( cl . getOptionValue ( "par_moves" , String . valueOf ( MOVER_THREAD_POOL_SIZE ) ) ) ; maxIterationTime = iterationTime * 60 * 1000L ; threshold = checkThreshold ( newThreshold ) ; System . out . println ( "Running with threshold of " + threshold + " and iteration time of " + maxIterationTime + " milliseconds" ) ; } catch ( RuntimeException e ) { printUsage ( cliOpts ) ; throw e ; } }
parse command line arguments
31,796
private long getLastTimeStamp ( ) { long result = - 1 ; if ( coronaReleaseFileCheck != null && ! coronaReleaseFileCheck . isEmpty ( ) ) { result = getLastTimeStamp ( new Path ( releasePath , coronaReleaseFileCheck ) ) ; if ( result > 0 ) { return result ; } } return getLastTimeStamp ( releasePath ) ; }
getLastStamp will go throught all the files and directories in the release directory and find the largest timestamp . This is used to check if there is any new release . RELEASE_COPY_PATTERN and CORONA_RELEASE_FILE_CHECK can be used to limit the files checked
31,797
private long getLastTimeStamp ( Path pathToCheck ) { long lastTimeStamp = - 1 ; long tmpTimeStamp = - 1 ; try { for ( FileStatus fileStat : fs . listStatus ( pathToCheck ) ) { Path srcPath = fileStat . getPath ( ) ; if ( ! fileStat . isDir ( ) ) { boolean checkFlag = true ; if ( release_pattern != null ) { Matcher m = release_pattern . matcher ( srcPath . toString ( ) ) ; if ( ! m . find ( ) ) { checkFlag = false ; } } if ( checkFlag ) { tmpTimeStamp = fileStat . getModificationTime ( ) ; } else { continue ; } } else { tmpTimeStamp = getLastTimeStamp ( srcPath ) ; } if ( tmpTimeStamp > lastTimeStamp ) { lastTimeStamp = tmpTimeStamp ; } } } catch ( IOException ioe ) { LOG . error ( "IOException when checking timestamp " , ioe ) ; } return lastTimeStamp ; }
Get the release directory s latest timestamp
31,798
private boolean copyRelease ( Path src , Path dest , boolean isTop , boolean isForced ) { try { if ( ! fs . exists ( dest ) ) { if ( ! fs . mkdirs ( dest ) ) { LOG . error ( "Unable to make dir " + dest . toString ( ) ) ; return false ; } } else { if ( isTop && ! isForced ) { Path donePath = new Path ( dest , RELEASE_TAG_FILE ) ; if ( fs . exists ( donePath ) ) { LOG . info ( donePath + " exists. There is no need to copy again" ) ; return true ; } } } for ( FileStatus fileStat : fs . listStatus ( src ) ) { Path srcPath = fileStat . getPath ( ) ; if ( ! fileStat . isDir ( ) ) { boolean copyFlag = true ; if ( release_pattern != null ) { Matcher m = release_pattern . matcher ( srcPath . toString ( ) ) ; if ( ! m . find ( ) ) { copyFlag = false ; } } if ( copyFlag ) { Path destPath = new Path ( dest , srcPath . getName ( ) ) ; fs . copyFromLocalFile ( srcPath , destPath ) ; } } else { Path destPath = new Path ( dest , srcPath . getName ( ) ) ; if ( ! copyRelease ( srcPath , destPath , false , isForced ) ) { LOG . error ( "Unable to create link for " + srcPath . toString ( ) + " as " + destPath . toString ( ) ) ; return false ; } } } if ( isTop ) { Path donePath = new Path ( dest , RELEASE_TAG_FILE ) ; FSDataOutputStream fos = fs . create ( donePath ) ; fos . close ( ) ; } } catch ( IOException ioe ) { LOG . error ( "IOException when link dir " , ioe ) ; return false ; } return true ; }
For every jar files from the source create a link in the dest
31,799
public void sessionEnd ( SessionStatus finishState ) { if ( sessionStatusToMetrics . containsKey ( finishState ) ) { sessionStatusToMetrics . get ( finishState ) . inc ( ) ; } else { throw new IllegalArgumentException ( "Invalid end state " + finishState ) ; } }
Record the end of a session .