idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
31,700 | static String getStatusCssClass ( DeviceTestResult testResult ) { String status ; switch ( testResult . getStatus ( ) ) { case PASS : status = "pass" ; break ; case IGNORED : status = "ignored" ; break ; case FAIL : status = "fail" ; break ; case ASSUMPTION_FAILURE : status = "assumption-violation" ; break ; default : ... | Convert a test result status into an HTML CSS class . |
31,701 | static IDevice obtainRealDevice ( AndroidDebugBridge adb , String serial ) { for ( IDevice adbDevice : adb . getDevices ( ) ) { if ( adbDevice . getSerialNumber ( ) . equals ( serial ) ) { return adbDevice ; } } throw new IllegalArgumentException ( "Unknown device serial: " + serial ) ; } | Fetch or create a real device that corresponds to a device model . |
31,702 | public static Set < String > findAllDevices ( AndroidDebugBridge adb , Integer minApiLevel ) { Set < String > devices = new LinkedHashSet < > ( ) ; for ( IDevice realDevice : adb . getDevices ( ) ) { if ( minApiLevel == null ) { devices . add ( realDevice . getSerialNumber ( ) ) ; } else { DeviceDetails deviceDetails =... | Find all device serials that are plugged in through ADB . |
31,703 | public final int next ( ) throws IOException { if ( m_nextException != null ) { throw m_nextException ; } try { return doNext ( ) ; } catch ( IOException e ) { m_nextException = e ; resetState ( ) ; throw e ; } } | Advances to the next tag . Once method returns END_DOCUMENT it always returns END_DOCUMENT . Once method throws an exception it always throws the same exception . |
31,704 | public final int getAttributeResourceID ( int index ) { int resourceIndex = getAttribute ( index ) . name ; if ( m_resourceIDs == null || resourceIndex < 0 || resourceIndex >= m_resourceIDs . length ) { return 0 ; } return m_resourceIDs [ resourceIndex ] ; } | Returns attribute resource ID . |
31,705 | private RemoteAndroidTestRunner createConfiguredRunner ( String testPackage , String testRunner , IDevice device ) throws Exception { RemoteAndroidTestRunner runner = new SpoonAndroidTestRunner ( instrumentationInfo . getApplicationPackage ( ) , testPackage , testRunner , device , clearAppDataBeforeEachTest , debug ) ;... | Create a configured Test Runner . This method adds sharding class name method name test size and coverage if available . |
31,706 | private void pullDeviceFiles ( IDevice device ) throws Exception { for ( String dir : DEVICE_DIRS ) { pullDirectory ( device , dir ) ; } } | Download all files from a single device to the local machine . |
31,707 | private void cleanData ( List < GalenTestInfo > testInfos ) { for ( GalenTestInfo testInfo : testInfos ) { if ( testInfo . getReport ( ) != null ) { try { FileTempStorage storage = testInfo . getReport ( ) . getFileStorage ( ) ; if ( storage != null ) { storage . cleanup ( ) ; } } catch ( Exception e ) { LOG . error ( ... | Removes temporary test data |
31,708 | public String evalStrictToString ( String script ) { Object returnedObject = context . evaluateString ( scope , script , "<cmd>" , 1 , null ) ; String unwrappedObject = unwrapProcessedObjectToString ( returnedObject ) ; if ( unwrappedObject != null ) { return unwrappedObject ; } else return "null" ; } | Used for processing js expressions in page spec reader . In case of failure throws an exception |
31,709 | private List < PageItemNode > restructurePageItems ( List < PageItem > items ) { List < PageItemNode > pins = items . stream ( ) . map ( PageItemNode :: new ) . collect ( toList ( ) ) ; for ( PageItemNode pinA : pins ) { for ( PageItemNode pinB : pins ) { if ( pinA != pinB ) { if ( isInside ( pinA . getPageItem ( ) . g... | Orders page items into a tree by their area . Tries to fit one item inside another |
31,710 | public void setObjects ( Map < String , Locator > objects ) { this . objects . clear ( ) ; if ( objects != null ) { this . objects . putAll ( objects ) ; } } | Clears current objects list and sets new object list |
31,711 | public void setObjectGroups ( Map < String , List < String > > objectGroups ) { this . objectGroups . clear ( ) ; if ( objectGroups != null ) { this . objectGroups . putAll ( objectGroups ) ; } } | Clears the current object groups list and sets new group list |
31,712 | public void setSections ( List < PageSection > sections ) { this . sections . clear ( ) ; if ( sections != null ) { this . sections . addAll ( sections ) ; } } | Clears the current root sections and copies new sections from given list |
31,713 | public List < String > findOnlyExistingMatchingObjectNames ( String objectExpression ) { String [ ] parts = objectExpression . split ( "," ) ; List < String > allSortedObjectNames = getSortedObjectNames ( ) ; List < String > resultingObjectNames = new LinkedList < > ( ) ; for ( String part : parts ) { String singleExpr... | Find all objects that match galen object statements |
31,714 | public List < String > getSortedObjectNames ( ) { List < String > list = new ArrayList < > ( getObjects ( ) . keySet ( ) ) ; Collections . sort ( list , new AlphanumericComparator ( ) ) ; return list ; } | Returns an alphanumericly sorted list of names of all declared objects |
31,715 | public List < String > findObjectsInGroup ( String groupName ) { if ( getObjectGroups ( ) . containsKey ( groupName ) ) { return getObjectGroups ( ) . get ( groupName ) ; } else { return Collections . emptyList ( ) ; } } | Find all objects belonging to a specific group |
31,716 | public void merge ( PageSpec spec ) { if ( spec == null ) { throw new IllegalArgumentException ( "Cannot merge null spec" ) ; } objects . putAll ( spec . getObjects ( ) ) ; sections . addAll ( spec . getSections ( ) ) ; objectGroups . putAll ( spec . getObjectGroups ( ) ) ; } | Merges all objects sections and objectGroups from spec |
31,717 | public void addSpec ( String sectionName , String objectName , String specText ) { PageSection pageSection = findSection ( sectionName ) ; if ( pageSection == null ) { pageSection = new PageSection ( sectionName ) ; sections . add ( pageSection ) ; } ObjectSpecs objectSpecs = new ObjectSpecs ( objectName ) ; objectSpec... | Parses the spec from specText and adds it to the page spec inside specified section . If section does not exit it will create it |
31,718 | public void cleanup ( ) { if ( this . childStorages != null ) { for ( FileTempStorage storage : this . childStorages ) { storage . cleanup ( ) ; } } for ( File file : this . files . values ( ) ) { FileUtils . deleteQuietly ( file ) ; } this . files . clear ( ) ; } | Removes all temporary files from disk . IMPORTANT! Use this call only in the end when you are sure you don t need report files anymore |
31,719 | public static BufferedImage resizeScreenshotIfNeeded ( WebDriver driver , BufferedImage screenshotImage ) { Double devicePixelRatio = 1.0 ; try { devicePixelRatio = ( ( Number ) ( ( JavascriptExecutor ) driver ) . executeScript ( JS_RETRIEVE_DEVICE_PIXEL_RATIO ) ) . doubleValue ( ) ; } catch ( Exception ex ) { ex . pri... | Check the devicePixelRatio and adapts the size of the screenshot as if the ratio was 1 . 0 |
31,720 | public int calculatePointOffsetDistance ( Point point ) { int right = left + width ; int bottom = top + height ; int pointLeft = point . getLeft ( ) ; int pointTop = point . getTop ( ) ; if ( contains ( point ) ) { return max ( top - pointTop , pointTop - bottom , left - pointLeft , pointLeft - right ) ; } else if ( is... | Calculates the distance of given point to one of the rect edges . If the point is located inside the return result will be negative . If the point is located on edge the result will be zero If the point is located outside of the rect - it will return positive value |
31,721 | public static LayoutReport checkPageSpecLayout ( WebDriver driver , PageSpec pageSpec , String [ ] includedTags , String [ ] excludedTags , String screenshotFilePath ) throws IOException { TestSession session = TestSession . current ( ) ; if ( session == null ) { throw new UnregisteredTestSession ( "Cannot check layout... | Used in GalenApi . js |
31,722 | protected String getReport ( ) { StringBuffer sb = new StringBuffer ( ) ; Iterator iter = this . longCounters . entrySet ( ) . iterator ( ) ; while ( iter . hasNext ( ) ) { Entry e = ( Entry ) iter . next ( ) ; sb . append ( e . getKey ( ) . toString ( ) ) . append ( "\t" ) . append ( e . getValue ( ) ) . append ( "\n"... | log the counters |
31,723 | public static String stringifySolution ( int width , int height , List < List < ColumnName > > solution ) { String [ ] [ ] picture = new String [ height ] [ width ] ; StringBuffer result = new StringBuffer ( ) ; for ( List < ColumnName > row : solution ) { Piece piece = null ; for ( ColumnName item : row ) { if ( item ... | Convert a solution to the puzzle returned by the model into a string that represents the placement of the pieces onto the board . |
31,724 | public SolutionCategory getCategory ( List < List < ColumnName > > names ) { Piece xPiece = null ; for ( Piece p : pieces ) { if ( "x" . equals ( p . name ) ) { xPiece = p ; break ; } } for ( List < ColumnName > row : names ) { if ( row . contains ( xPiece ) ) { int low_x = width ; int high_x = 0 ; int low_y = height ;... | Find whether the solution has the x in the upper left quadrant the x - midline the y - midline or in the center . |
31,725 | protected void initializePieces ( ) { pieces . add ( new Piece ( "x" , " x /xxx/ x " , false , oneRotation ) ) ; pieces . add ( new Piece ( "v" , "x /x /xxx" , false , fourRotations ) ) ; pieces . add ( new Piece ( "t" , "xxx/ x / x " , false , fourRotations ) ) ; pieces . add ( new Piece ( "w" , " x/ xx/xx " , fals... | Fill in the pieces list . |
31,726 | private static void generateRows ( DancingLinks dancer , Piece piece , int width , int height , boolean flip , boolean [ ] row , boolean upperLeft ) { int [ ] rotations = piece . getRotations ( ) ; for ( int rotIndex = 0 ; rotIndex < rotations . length ; ++ rotIndex ) { boolean [ ] [ ] shape = piece . getShape ( flip ,... | For a given piece generate all of the potential placements and add them as rows to the model . |
31,727 | public static void main ( String [ ] args ) { int width = 6 ; int height = 10 ; Pentomino model = new Pentomino ( width , height ) ; List splits = model . getSplits ( 2 ) ; for ( Iterator splitItr = splits . iterator ( ) ; splitItr . hasNext ( ) ; ) { int [ ] choices = ( int [ ] ) splitItr . next ( ) ; System . out . p... | Solve the 6x10 pentomino puzzle . |
31,728 | protected String constructQuery ( String table , String [ ] fieldNames ) { if ( fieldNames == null ) { throw new IllegalArgumentException ( "Field names may not be null" ) ; } StringBuilder query = new StringBuilder ( ) ; query . append ( "INSERT INTO " ) . append ( table ) ; if ( fieldNames . length > 0 && fieldNames ... | Constructs the query used as the prepared statement to insert data . |
31,729 | public static void setOutput ( JobConf job , String tableName , String ... fieldNames ) { job . setOutputFormat ( DBOutputFormat . class ) ; job . setReduceSpeculativeExecution ( false ) ; DBConfiguration dbConf = new DBConfiguration ( job ) ; dbConf . setOutputTableName ( tableName ) ; dbConf . setOutputFieldNames ( f... | Initializes the reduce - part of the job with the appropriate output settings |
31,730 | public List < String > getRacks ( ) { netlock . readLock ( ) . lock ( ) ; try { return new ArrayList < String > ( racks ) ; } finally { netlock . readLock ( ) . unlock ( ) ; } } | Get a copy of the racks list . |
31,731 | public void add ( Node node ) { if ( node == null ) return ; if ( node instanceof InnerNode ) { throw new IllegalArgumentException ( "Not allow to add an inner node: " + NodeBase . getPath ( node ) ) ; } netlock . writeLock ( ) . lock ( ) ; try { Node rack = getNode ( node . getNetworkLocation ( ) ) ; if ( rack != null... | Add a leaf node Update node counter & rack counter if neccessary |
31,732 | public void remove ( Node node ) { if ( node == null ) return ; if ( node instanceof InnerNode ) { throw new IllegalArgumentException ( "Not allow to remove an inner node: " + NodeBase . getPath ( node ) ) ; } LOG . info ( "Removing a node: " + NodeBase . getPath ( node ) ) ; netlock . writeLock ( ) . lock ( ) ; try { ... | Remove a node Update node counter & rack counter if neccessary |
31,733 | public Node getNode ( String loc ) { netlock . readLock ( ) . lock ( ) ; try { loc = NodeBase . normalize ( loc ) ; if ( ! NodeBase . ROOT . equals ( loc ) ) loc = loc . substring ( 1 ) ; return clusterMap . getLoc ( loc ) ; } finally { netlock . readLock ( ) . unlock ( ) ; } } | Given a string representation of a node return its reference |
31,734 | public List < Node > getDatanodesInRack ( String loc ) { netlock . readLock ( ) . lock ( ) ; try { loc = NodeBase . normalize ( loc ) ; if ( ! NodeBase . ROOT . equals ( loc ) ) loc = loc . substring ( 1 ) ; InnerNode rack = ( InnerNode ) clusterMap . getLoc ( loc ) ; if ( rack == null ) return null ; return new ArrayL... | Given a string representation of a rack return its children |
31,735 | public Set < String > getAllRacks ( ) { netlock . readLock ( ) . lock ( ) ; try { Set < String > result = new HashSet < String > ( this . racks ) ; result . addAll ( this . masterRacksSet ) ; return result ; } finally { netlock . readLock ( ) . unlock ( ) ; } } | Returns the set of racks |
31,736 | public int getDistance ( Node node1 , Node node2 ) { if ( node1 == node2 ) { return 0 ; } Node n1 = node1 , n2 = node2 ; int dis = 0 ; netlock . readLock ( ) . lock ( ) ; try { int level1 = node1 . getLevel ( ) , level2 = node2 . getLevel ( ) ; while ( n1 != null && level1 > level2 ) { n1 = n1 . getParent ( ) ; level1 ... | Return the distance between two nodes It is assumed that the distance from one node to its parent is 1 The distance between two nodes is calculated by summing up their distances to their closest common ancestor . |
31,737 | public boolean isOnSameRack ( Node node1 , Node node2 ) { if ( node1 == null || node2 == null ) { return false ; } netlock . readLock ( ) . lock ( ) ; try { return node1 . getParent ( ) == node2 . getParent ( ) ; } finally { netlock . readLock ( ) . unlock ( ) ; } } | Check if two nodes are on the same rack |
31,738 | public String chooseRack ( Set < String > excludedRacks ) { String chosenRack = null ; HashSet < Integer > chosenIndexes = new HashSet < Integer > ( ) ; netlock . readLock ( ) . lock ( ) ; try { int totalRacks = getNumOfRacks ( ) ; if ( totalRacks - excludedRacks . size ( ) <= 0 ) return null ; while ( true ) { int rac... | Choose a rack which is not in exlcudedRacks |
31,739 | public void run ( Context context ) throws IOException , InterruptedException { setup ( context ) ; while ( context . nextKeyValue ( ) ) { map ( context . getCurrentKey ( ) , context . getCurrentValue ( ) , context ) ; } cleanup ( context ) ; } | Expert users can override this method for more complete control over the execution of the Mapper . |
31,740 | private static int getBlockIdInFile ( DistributedFileSystem srcFs , Path srcPath , long blockId ) throws IOException { FileStatus srcStat = srcFs . getFileStatus ( srcPath ) ; LocatedBlocks lbs = srcFs . getClient ( ) . getLocatedBlocks ( srcPath . toUri ( ) . getPath ( ) , 0 , srcStat . getLen ( ) ) ; int i = 0 ; LOG ... | Get the index of a block in a file according to the blockId . |
31,741 | public static LocationPair getBlockLocation ( Codec codec , FileSystem srcFs , Path srcFile , int blockIdxInFile , Configuration conf , List < FileStatus > lfs ) throws IOException { int stripeIdx = 0 ; int blockIdxInStripe = 0 ; int blockIdx = blockIdxInFile ; if ( codec . isDirRaid ) { Path parentPath = srcFile . get... | Given a block in the file and specific codec return the LocationPair object which contains id of the stripe it belongs to and its location in the stripe |
31,742 | public void createPartControl ( Composite parent ) { Tree main = new Tree ( parent , SWT . SINGLE | SWT . FULL_SELECTION | SWT . H_SCROLL | SWT . V_SCROLL ) ; main . setHeaderVisible ( true ) ; main . setLinesVisible ( false ) ; main . setLayoutData ( new GridData ( GridData . FILL_BOTH ) ) ; TreeColumn serverCol = new... | Creates the columns for the view |
31,743 | public void skip ( K key ) throws IOException { if ( hasNext ( ) ) { while ( cmp . compare ( khead , key ) <= 0 && next ( ) ) ; } } | Skip key - value pairs with keys less than or equal to the key provided . |
31,744 | @ SuppressWarnings ( "unchecked" ) public void accept ( CompositeRecordReader . JoinCollector i , K key ) throws IOException { vjoin . clear ( ) ; if ( 0 == cmp . compare ( key , khead ) ) { do { vjoin . add ( vhead ) ; } while ( next ( ) && 0 == cmp . compare ( key , khead ) ) ; } i . add ( id , vjoin ) ; } | JoinCollector comes from parent which has |
31,745 | public boolean next ( K key , U value ) throws IOException { if ( hasNext ( ) ) { WritableUtils . cloneInto ( key , khead ) ; WritableUtils . cloneInto ( value , vhead ) ; next ( ) ; return true ; } return false ; } | Write key - value pair at the head of this stream to the objects provided ; get next key - value pair from proxied RR . |
31,746 | public static BlockPlacementPolicy getInstance ( Configuration conf , FSClusterStats stats , NetworkTopology clusterMap , HostsFileReader hostsReader , DNSToSwitchMapping dnsToSwitchMapping , FSNamesystem namesystem ) { Class < ? extends BlockPlacementPolicy > replicatorClass = conf . getClass ( "dfs.block.replicator.c... | Get an instance of the configured Block Placement Policy based on the value of the configuration paramater dfs . block . replicator . classname . |
31,747 | public static void setFilterClass ( Configuration conf , Class filterClass ) { conf . set ( FILTER_CLASS , filterClass . getName ( ) ) ; } | set the filter class |
31,748 | protected ExecutorService createExecutor ( ) { return Executors . newSingleThreadExecutor ( new ThreadFactoryBuilder ( ) . setDaemon ( true ) . setNameFormat ( "Logger channel to " + addr ) . setUncaughtExceptionHandler ( UncaughtExceptionHandlers . systemExit ( ) ) . build ( ) ) ; } | Separated out for easy overriding in tests . |
31,749 | private void heartbeatIfNecessary ( ) throws IOException { if ( lastHeartbeatStopwatch . elapsedMillis ( ) > HEARTBEAT_INTERVAL_MILLIS || ! lastHeartbeatStopwatch . isRunning ( ) ) { try { getProxy ( ) . heartbeat ( createReqInfo ( ) ) ; } finally { lastHeartbeatStopwatch . reset ( ) . start ( ) ; } } } | When we ve entered an out - of - sync state it s still useful to periodically send an empty RPC to the server such that it has the up to date committedTxId . This acts as a sanity check during recovery and also allows that node s metrics to be up - to - date about its lag . |
31,750 | public URL buildURLToFetchImage ( long txid ) { Preconditions . checkArgument ( txid >= - 1 , "Invalid segment: %s" , txid ) ; Preconditions . checkState ( httpPort != - 1 , "HTTP port not set yet" ) ; try { String path = GetJournalImageServlet . buildPath ( journalId , txid , nsInfo , true ) ; return new URL ( "http" ... | Build url to fetch image from the journal node to which this logger channel is attached . |
31,751 | public boolean moveAndDeleteRelativePath ( String volume , String pathName ) throws IOException { volume = normalizePath ( volume ) ; String newPathName = format . format ( new Date ( ) ) + "_" + uniqueId . getAndIncrement ( ) ; newPathName = TOBEDELETED + Path . SEPARATOR_CHAR + newPathName ; Path source = new Path ( ... | Move the path name on one volume to a temporary location and then delete them . |
31,752 | public boolean moveAndDeleteFromEachVolume ( String pathName ) throws IOException { boolean result = true ; for ( int i = 0 ; i < volumes . length ; i ++ ) { result = result && moveAndDeleteRelativePath ( volumes [ i ] , pathName ) ; } return result ; } | Move the path name on each volume to a temporary location and then delete them . |
31,753 | private static String getRelativePathName ( String absolutePathName , String volume ) { absolutePathName = normalizePath ( absolutePathName ) ; if ( ! absolutePathName . startsWith ( volume ) ) { return null ; } String fileName = absolutePathName . substring ( volume . length ( ) ) ; if ( fileName . charAt ( 0 ) == Pat... | Get the relative path name with respect to the root of the volume . |
31,754 | public boolean moveAndDeleteAbsolutePath ( String absolutePathName ) throws IOException { for ( int v = 0 ; v < volumes . length ; v ++ ) { String relative = getRelativePathName ( absolutePathName , volumes [ v ] ) ; if ( relative != null ) { return moveAndDeleteRelativePath ( volumes [ v ] , relative ) ; } } throw new... | Move the path name to a temporary location and then delete it . |
31,755 | public static String buildPath ( String journalId , long txid , NamespaceInfo nsInfo , boolean throttle ) { StringBuilder path = new StringBuilder ( "/getImage?getimage=1&" ) ; try { path . append ( JOURNAL_ID_PARAM ) . append ( "=" ) . append ( URLEncoder . encode ( journalId , "UTF-8" ) ) ; path . append ( "&" + TXID... | Build path to fetch image at given txid for the given journal . This path does not contain address . |
31,756 | public void set ( Writable obj ) { instance = obj ; Class < ? extends Writable > instanceClazz = instance . getClass ( ) ; Class < ? extends Writable > [ ] clazzes = getTypes ( ) ; for ( int i = 0 ; i < clazzes . length ; i ++ ) { Class < ? extends Writable > clazz = clazzes [ i ] ; if ( clazz . equals ( instanceClazz ... | Set the instance that is wrapped . |
31,757 | public RemoteEditLogManifest getEditLogManifest ( long firstTxId ) throws IOException { File currentDir = sd . getCurrentDir ( ) ; List < EditLogFile > allLogFiles = matchEditLogs ( FileUtil . listFiles ( currentDir ) ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( allLogFiles ) ; } List < RemoteEditLog > ret = new... | Find all editlog segments starting at or above the given txid . Include inprogress segments . Notice that the segments do not have to be contiguous . JournalSet handles the holes between segments . |
31,758 | public List < EditLogFile > getLogFiles ( long fromTxId , boolean enforceBoundary ) throws IOException { File currentDir = sd . getCurrentDir ( ) ; List < EditLogFile > allLogFiles = matchEditLogs ( currentDir . listFiles ( ) ) ; List < EditLogFile > logFiles = new ArrayList < EditLogFile > ( ) ; for ( EditLogFile elf ... | Get all edit log segments |
31,759 | private long findMaxTransaction ( ) throws IOException { for ( EditLogFile elf : getLogFiles ( 0 ) ) { if ( elf . isInProgress ( ) ) { maxSeenTransaction = Math . max ( elf . getFirstTxId ( ) , maxSeenTransaction ) ; } maxSeenTransaction = Math . max ( elf . getLastTxId ( ) , maxSeenTransaction ) ; } return maxSeenTran... | Find the maximum transaction in the journal . This gets stored in a member variable as corrupt edit logs will be moved aside but we still need to remember their first tranaction id in the case that it was the maximum transaction in the journal . |
31,760 | void init ( JobID jobId ) { this . startTime = JobTracker . getClock ( ) . getTime ( ) ; this . id = new TaskID ( jobId , isMapTask ( ) , partition ) ; this . skipping = startSkipping ( ) ; long speculativeDuration ; if ( isMapTask ( ) ) { this . speculativeLag = conf . getMapSpeculativeLag ( ) ; speculativeDuration = ... | Initialization common to Map and Reduce |
31,761 | public long getDispatchTime ( TaskAttemptID taskid ) { Long l = dispatchTimeMap . get ( taskid ) ; if ( l != null ) { return l . longValue ( ) ; } return 0 ; } | Return the dispatch time |
31,762 | public void setDispatchTime ( TaskAttemptID taskid , long disTime ) { dispatchTimeMap . put ( taskid , disTime ) ; this . lastDispatchTime = disTime ; } | Set the dispatch time |
31,763 | public boolean shouldClose ( TaskAttemptID taskid ) { if ( tasksReportedClosed . contains ( taskid ) ) { if ( tasksToKill . keySet ( ) . contains ( taskid ) ) return true ; else return false ; } boolean close = false ; TaskStatus ts = taskStatuses . get ( taskid ) ; if ( ( ts != null ) && ( ( this . failed ) || ( ( job... | Returns whether a component task - thread should be closed because the containing JobInProgress has completed or the task is killed by the user |
31,764 | synchronized TaskReport generateSingleReport ( ) { ArrayList < String > diagnostics = new ArrayList < String > ( ) ; for ( List < String > l : taskDiagnosticData . values ( ) ) { diagnostics . addAll ( l ) ; } TIPStatus currentStatus = null ; if ( isRunning ( ) && ! isComplete ( ) ) { currentStatus = TIPStatus . RUNNIN... | Creates a status report for this task . Includes the task ID and overall status plus reports for all the component task - threads that have ever been started . |
31,765 | public void addDiagnosticInfo ( TaskAttemptID taskId , String diagInfo ) { List < String > diagHistory = taskDiagnosticData . get ( taskId ) ; if ( diagHistory == null ) { diagHistory = new ArrayList < String > ( ) ; taskDiagnosticData . put ( taskId , diagHistory ) ; } diagHistory . add ( diagInfo ) ; } | Save diagnostic information for a given task . |
31,766 | public void incompleteSubTask ( TaskAttemptID taskid , JobStatus jobStatus ) { TaskStatus status = taskStatuses . get ( taskid ) ; String trackerName ; String trackerHostName = null ; TaskStatus . State taskState = TaskStatus . State . FAILED ; if ( status != null ) { trackerName = status . getTaskTracker ( ) ; tracker... | Indicate that one of the taskids in this TaskInProgress has failed . |
31,767 | public void completed ( TaskAttemptID taskid ) { completedTask ( taskid , TaskStatus . State . SUCCEEDED ) ; setSuccessfulTaskid ( taskid ) ; this . completes ++ ; this . execFinishTime = JobTracker . getClock ( ) . getTime ( ) ; recomputeProgress ( ) ; } | Indicate that one of the taskids in this TaskInProgress has successfully completed! |
31,768 | boolean killTask ( TaskAttemptID taskId , boolean shouldFail , String diagnosticInfo ) { TaskStatus st = taskStatuses . get ( taskId ) ; if ( st != null && ( st . getRunState ( ) == TaskStatus . State . RUNNING || st . getRunState ( ) == TaskStatus . State . COMMIT_PENDING || st . inTaskCleanupPhase ( ) || st . getRunS... | Kill the given task |
31,769 | boolean canBeSpeculated ( long currentTime ) { if ( skipping || ! isRunnable ( ) || ! isRunning ( ) || completes != 0 || isOnlyCommitPending ( ) || activeTasks . size ( ) > MAX_TASK_EXECS ) { if ( isMapTask ( ) ? job . shouldLogCannotspeculativeMaps ( ) : job . shouldLogCannotspeculativeReduces ( ) ) { LOG . info ( "Ta... | Can this task be speculated? This requires that it isn t done or almost done and that it isn t already being speculatively executed . |
31,770 | public Task getTaskToRun ( String taskTracker ) { TaskAttemptID taskid = null ; if ( nextTaskId < ( MAX_TASK_EXECS + maxTaskAttempts + numKilledTasks ) ) { int attemptId = job . getNumRestarts ( ) * NUM_ATTEMPTS_PER_RESTART + nextTaskId ; taskid = new TaskAttemptID ( id , attemptId ) ; ++ nextTaskId ; } else { LOG . wa... | Return a Task that can be sent to a TaskTracker for execution . |
31,771 | public Task addRunningTask ( TaskAttemptID taskid , String taskTracker , boolean taskCleanup ) { int numSlotsNeeded = taskCleanup ? 1 : numSlotsRequired ; Task t = null ; if ( isMapTask ( ) ) { LOG . debug ( "attempt " + numTaskFailures + " sending skippedRecords " + failedRanges . getIndicesCount ( ) ) ; String splitC... | Adds a previously running task to this tip . This is used in case of jobtracker restarts . |
31,772 | public boolean hasRunOnMachine ( String trackerHost , String trackerName ) { return this . activeTasks . values ( ) . contains ( trackerName ) || hasFailedOnMachine ( trackerHost ) ; } | Was this task ever scheduled to run on this machine? |
31,773 | public String getSplitNodes ( ) { if ( ! isMapTask ( ) || jobSetup || jobCleanup ) { return "" ; } String [ ] nodes = rawSplit . getLocations ( ) ; if ( nodes == null || nodes . length == 0 ) { return "" ; } StringBuffer ret = new StringBuffer ( nodes [ 0 ] ) ; for ( int i = 1 ; i < nodes . length ; i ++ ) { ret . appe... | Gets the Node list of input split locations sorted in rack order . |
31,774 | public void updateProgressRate ( long currentTime ) { double bestProgressRate = 0 ; for ( TaskStatus ts : taskStatuses . values ( ) ) { if ( ts . getRunState ( ) == TaskStatus . State . RUNNING || ts . getRunState ( ) == TaskStatus . State . SUCCEEDED || ts . getRunState ( ) == TaskStatus . State . COMMIT_PENDING ) { d... | update progress rate for a task |
31,775 | private void updateJobStats ( Phase phase , ProcessingRates oldRates , ProcessingRates newRates ) { DataStatistics stats = job . getRunningTaskStatistics ( phase ) ; stats . updateStatistics ( oldRates . getRate ( phase ) , newRates . getRate ( phase ) ) ; } | Helper function that updates the processing rates stats for this job . Only updates the rate in the corresponding phase . |
31,776 | private int getAvailableSlots ( TaskTrackerStatus tts , TaskType type ) { return getMaxSlots ( tts , type ) - occupiedSlotsAfterHeartbeat ( tts , type ) ; } | Obtain the how many more slots can be scheduled on this tasktracker |
31,777 | private int occupiedSlotsAfterHeartbeat ( TaskTrackerStatus tts , TaskType type ) { int occupied = ( type == TaskType . MAP ) ? tts . countOccupiedMapSlots ( ) - tts . getMapsReleased ( ) : tts . countOccupiedReduceSlots ( ) - tts . getReducesReleased ( ) ; return occupied ; } | Obtain the number of occupied slots after the scheduled kills are done |
31,778 | private void updateLocalityWaitTimes ( long currentTime ) { long timeSinceLastHeartbeat = ( lastHeartbeatTime == 0 ? 0 : currentTime - lastHeartbeatTime ) ; lastHeartbeatTime = currentTime ; for ( JobInfo info : infos . values ( ) ) { if ( info . skippedAtLastHeartbeat ) { info . timeWaitedForLocalMap += timeSinceLastH... | Update locality wait times for jobs that were skipped at last heartbeat . |
31,779 | private void updateLastMapLocalityLevel ( JobInProgress job , Task mapTaskLaunched , TaskTrackerStatus tracker ) { JobInfo info = infos . get ( job ) ; LocalityLevel localityLevel = localManager . taskToLocalityLevel ( job , mapTaskLaunched , tracker ) ; info . lastMapLocalityLevel = localityLevel ; info . timeWaitedFo... | Update a job s locality level and locality wait variables given that that it has just launched a map task on a given task tracker . |
31,780 | protected void update ( ) { ClusterStatus clusterStatus = taskTrackerManager . getClusterStatus ( ) ; if ( autoComputeLocalityDelay ) { JobTracker jobTracker = ( JobTracker ) taskTrackerManager ; localityDelayNodeLocal = Math . min ( MAX_AUTOCOMPUTED_LOCALITY_DELAY , ( long ) ( 1.5 * jobTracker . getNextHeartbeatInterv... | Recompute the internal variables used by the scheduler - per - job weights fair shares deficits minimum slot allocations and numbers of running and needed tasks of each type . |
31,781 | private BlockedAdmissionReason adjustClusterwideReason ( AdmissionControlData admissionControlData , BlockedAdmissionReason originalReason , String poolName ) { BlockedAdmissionReason clusterwideReason = ( BlockedAdmissionReason . underClusterwideAdmissionControl ( admissionControlData . getSoftTaskLimit ( ) , admissio... | Based on the original reason and admission control data adjust the reason this job is not admitted if any . |
31,782 | synchronized Collection < NotAdmittedJobInfo > getNotAdmittedJobs ( ) { List < NotAdmittedJobInfo > jobInfoList = new ArrayList < NotAdmittedJobInfo > ( infos . size ( ) ) ; AdmissionControlData admissionControlData = jobInitializer . getAdmissionControlData ( ) ; float averageWaitMsecsPerHardAdmissionJob = jobInitiali... | Get the jobs that were not admitted and all the info needed for display . The reasons of why the jobs were not admitted were set by the fair scheduler but will be adjusted when this method is called based on the current job initializer admission control data . |
31,783 | private synchronized String getJobNotAdmittedReason ( JobInProgress job , JobAdmissionWaitInfo waitInfo ) { JobInfo jobInfo = infos . get ( job ) ; if ( jobInfo == null ) { return "Unknown, can't find job" ; } AdmissionControlData admissionControlData = jobInitializer . getAdmissionControlData ( ) ; return NotAdmittedJ... | Get a stringified reason for not admitting a job . |
31,784 | private void dumpStatus ( long now ) { if ( now - lastDumpStatusTime < dumpStatusPeriod ) { return ; } lastDumpStatusTime = now ; logJobStats ( infos . keySet ( ) , TaskType . MAP ) ; logJobStats ( infos . keySet ( ) , TaskType . REDUCE ) ; dumpSpeculationStatus ( now ) ; } | Output some scheduling information to LOG |
31,785 | private void slowerButAccurateCountTasks ( JobInfo info , JobInProgress job ) { int totalMaps = job . numMapTasks ; int finishedMaps = 0 ; int runningMaps = 0 ; int runningMapTips = 0 ; for ( TaskInProgress tip : job . getTasks ( org . apache . hadoop . mapreduce . TaskType . MAP ) ) { if ( tip . isComplete ( ) ) { fin... | Obtain task counts for a job by scanning all the tasks of all running jobs . |
31,786 | private void fifoWeightAdjust ( Pool pool ) { List < JobInProgress > jobs = new ArrayList < JobInProgress > ( ) ; jobs . addAll ( pool . getJobs ( ) ) ; Collections . sort ( jobs , fifoComparator ) ; double factor = 1.0 ; for ( JobInProgress job : jobs ) { JobInfo info = infos . get ( job ) ; if ( info == null ) { thro... | Boost the weight for the older jobs . |
31,787 | private boolean incSlotLimit ( JobInfo info , TaskType type , LimitType limit ) { switch ( limit ) { case MIN : if ( type == TaskType . MAP ) { if ( info . minMaps < runnableTasks ( info , type ) ) { info . minMaps += 1 ; return true ; } } else { if ( info . minReduces < runnableTasks ( info , type ) ) { info . minRedu... | Increment the slot limit of a job |
31,788 | private double computeShare ( JobInfo info , double w2sRatio , TaskType type , boolean considerMinMax ) { if ( ! isRunnable ( info ) ) { return 0 ; } double share = type == TaskType . MAP ? info . mapWeight : info . reduceWeight ; share *= w2sRatio ; if ( considerMinMax ) { int minSlots = type == TaskType . MAP ? info ... | Compute the number of slots assigned to a job given a particular weight - to - slot ratio w2sRatio . |
31,789 | protected int neededTasks ( JobInfo info , TaskType taskType ) { if ( info == null ) return 0 ; return taskType == TaskType . MAP ? info . neededMaps : info . neededReduces ; } | returning 0 s for jobs with no JobInfo present . |
31,790 | boolean isStarvedForMinShare ( JobInfo info , TaskType taskType ) { float starvingThreshold = ( float ) ( minTasks ( info , taskType ) * 0.9 ) ; return runningTasks ( info , taskType ) < starvingThreshold ; } | Is a job below 90% of its min share for the given task type? |
31,791 | protected void preemptTasksIfNecessary ( ) { if ( ! preemptionEnabled || jobComparator == JobComparator . FIFO ) return ; long curTime = clock . getTime ( ) ; if ( curTime - lastPreemptCheckTime < preemptionInterval ) return ; lastPreemptCheckTime = curTime ; int currentMaxPreemptibleTasks = maxPreemptibleTasks ; boole... | Check for jobs that need tasks preempted either because they have been below their guaranteed share for their pool s preemptionTimeout or they have been below half their fair share for the fairSharePreemptionTimeout . If such jobs exist compute how many tasks of each type need to be preempted and then select the right ... |
31,792 | public String getDatanodeReport ( ) { StringBuffer buffer = new StringBuffer ( ) ; long c = getCapacity ( ) ; long r = getRemaining ( ) ; long u = getDfsUsed ( ) ; long nonDFSUsed = getNonDfsUsed ( ) ; float usedPercent = getDfsUsedPercent ( ) ; float remainingPercent = getRemainingPercent ( ) ; buffer . append ( "Name... | A formatted string for reporting the status of the DataNode . |
31,793 | public String dumpDatanode ( ) { StringBuffer buffer = new StringBuffer ( ) ; long c = getCapacity ( ) ; long r = getRemaining ( ) ; long u = getDfsUsed ( ) ; buffer . append ( name ) ; if ( ! NetworkTopology . DEFAULT_RACK . equals ( location ) ) { buffer . append ( " " + location ) ; } if ( isDecommissioned ( ) ) { b... | A formatted string for printing the status of the DataNode . |
31,794 | protected void setAdminState ( AdminStates newState ) { if ( newState == AdminStates . NORMAL ) { adminState = null ; } else { adminState = newState ; } } | Sets the admin state of this node . |
31,795 | private void parse ( String [ ] args ) { Options cliOpts = setupOptions ( ) ; BasicParser parser = new BasicParser ( ) ; CommandLine cl = null ; try { try { cl = parser . parse ( cliOpts , args ) ; } catch ( ParseException ex ) { throw new IllegalArgumentException ( "args = " + Arrays . toString ( args ) ) ; } int newT... | parse command line arguments |
31,796 | private long getLastTimeStamp ( ) { long result = - 1 ; if ( coronaReleaseFileCheck != null && ! coronaReleaseFileCheck . isEmpty ( ) ) { result = getLastTimeStamp ( new Path ( releasePath , coronaReleaseFileCheck ) ) ; if ( result > 0 ) { return result ; } } return getLastTimeStamp ( releasePath ) ; } | getLastStamp will go throught all the files and directories in the release directory and find the largest timestamp . This is used to check if there is any new release . RELEASE_COPY_PATTERN and CORONA_RELEASE_FILE_CHECK can be used to limit the files checked |
31,797 | private long getLastTimeStamp ( Path pathToCheck ) { long lastTimeStamp = - 1 ; long tmpTimeStamp = - 1 ; try { for ( FileStatus fileStat : fs . listStatus ( pathToCheck ) ) { Path srcPath = fileStat . getPath ( ) ; if ( ! fileStat . isDir ( ) ) { boolean checkFlag = true ; if ( release_pattern != null ) { Matcher m = ... | Get the release directory s latest timestamp |
31,798 | private boolean copyRelease ( Path src , Path dest , boolean isTop , boolean isForced ) { try { if ( ! fs . exists ( dest ) ) { if ( ! fs . mkdirs ( dest ) ) { LOG . error ( "Unable to make dir " + dest . toString ( ) ) ; return false ; } } else { if ( isTop && ! isForced ) { Path donePath = new Path ( dest , RELEASE_T... | For every jar files from the source create a link in the dest |
31,799 | public void sessionEnd ( SessionStatus finishState ) { if ( sessionStatusToMetrics . containsKey ( finishState ) ) { sessionStatusToMetrics . get ( finishState ) . inc ( ) ; } else { throw new IllegalArgumentException ( "Invalid end state " + finishState ) ; } } | Record the end of a session . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.