idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
32,300 | public ClusterNode getRunnableNode ( RequestedNode requestedNode , LocalityLevel maxLevel , ResourceType type , Set < String > excluded ) { ClusterNode node = null ; RunnableIndices r = typeToIndices . get ( type ) ; node = r . getRunnableNodeForHost ( requestedNode ) ; if ( maxLevel == LocalityLevel . NODE || node != null ) { return node ; } node = r . getRunnableNodeForRack ( requestedNode , excluded ) ; if ( maxLevel == LocalityLevel . RACK || node != null ) { return node ; } node = r . getRunnableNodeForAny ( excluded ) ; return node ; } | Get a runnable node . |
32,301 | protected void addNode ( ClusterNode node , Map < ResourceType , String > resourceInfos ) { synchronized ( node ) { nameToNode . put ( node . getName ( ) , node ) ; faultManager . addNode ( node . getName ( ) , resourceInfos . keySet ( ) ) ; nameToApps . put ( node . getName ( ) , resourceInfos ) ; hostsToSessions . put ( node , new HashSet < String > ( ) ) ; clusterManager . getMetrics ( ) . restartTaskTracker ( 1 ) ; setAliveDeadMetrics ( ) ; for ( Map . Entry < ResourceType , RunnableIndices > entry : typeToIndices . entrySet ( ) ) { ResourceType type = entry . getKey ( ) ; if ( resourceInfos . containsKey ( type ) ) { if ( node . checkForGrant ( Utilities . getUnitResourceRequest ( type ) , resourceLimit ) ) { RunnableIndices r = entry . getValue ( ) ; r . addRunnable ( node ) ; } } } } } | Add a node to be managed . |
32,302 | private void updateRunnability ( ClusterNode node ) { synchronized ( node ) { for ( Map . Entry < ResourceType , RunnableIndices > entry : typeToIndices . entrySet ( ) ) { ResourceType type = entry . getKey ( ) ; RunnableIndices r = entry . getValue ( ) ; ResourceRequest unitReq = Utilities . getUnitResourceRequest ( type ) ; boolean currentlyRunnable = r . hasRunnable ( node ) ; boolean shouldBeRunnable = node . checkForGrant ( unitReq , resourceLimit ) ; if ( currentlyRunnable && ! shouldBeRunnable ) { LOG . info ( "Node " + node . getName ( ) + " is no longer " + type + " runnable" ) ; r . deleteRunnable ( node ) ; } else if ( ! currentlyRunnable && shouldBeRunnable ) { LOG . info ( "Node " + node . getName ( ) + " is now " + type + " runnable" ) ; r . addRunnable ( node ) ; } } } } | Update the runnable status of a node based on resources available . This checks both resources and slot availability . |
32,303 | protected void addAppToNode ( ClusterNode node , ResourceType type , String appInfo ) { synchronized ( node ) { Map < ResourceType , String > apps = nameToApps . get ( node . getName ( ) ) ; apps . put ( type , appInfo ) ; for ( Map . Entry < ResourceType , RunnableIndices > entry : typeToIndices . entrySet ( ) ) { if ( type . equals ( entry . getKey ( ) ) ) { if ( node . checkForGrant ( Utilities . getUnitResourceRequest ( type ) , resourceLimit ) ) { RunnableIndices r = entry . getValue ( ) ; r . addRunnable ( node ) ; } } } } } | Register a new application on the node |
32,304 | public Set < String > getNodeSessions ( String nodeName ) { ClusterNode node = nameToNode . get ( nodeName ) ; if ( node == null ) { LOG . warn ( "Trying to get the sessions for a non-existent node " + nodeName ) ; return new HashSet < String > ( ) ; } synchronized ( node ) { return new HashSet < String > ( hostsToSessions . get ( node ) ) ; } } | Get all the sessions that have grants on the node |
32,305 | public void deleteSession ( String session ) { for ( Set < String > sessions : hostsToSessions . values ( ) ) { sessions . remove ( session ) ; } } | Remove the references to the session |
32,306 | public void cancelGrant ( String nodeName , String sessionId , int requestId ) { ClusterNode node = nameToNode . get ( nodeName ) ; if ( node == null ) { LOG . warn ( "Canceling grant for non-existent node: " + nodeName ) ; return ; } synchronized ( node ) { if ( node . deleted ) { LOG . warn ( "Canceling grant for deleted node: " + nodeName ) ; return ; } String hoststr = node . getClusterNodeInfo ( ) . getAddress ( ) . getHost ( ) ; if ( ! canAllowNode ( hoststr ) ) { LOG . warn ( "Canceling grant for excluded node: " + hoststr ) ; return ; } ResourceRequestInfo req = node . getRequestForGrant ( sessionId , requestId ) ; if ( req != null ) { ResourceRequest unitReq = Utilities . getUnitResourceRequest ( req . getType ( ) ) ; boolean previouslyRunnable = node . checkForGrant ( unitReq , resourceLimit ) ; node . cancelGrant ( sessionId , requestId ) ; loadManager . decrementLoad ( req . getType ( ) ) ; if ( ! previouslyRunnable && node . checkForGrant ( unitReq , resourceLimit ) ) { RunnableIndices r = typeToIndices . get ( req . getType ( ) ) ; if ( ! faultManager . isBlacklisted ( node . getName ( ) , req . getType ( ) ) ) { r . addRunnable ( node ) ; } } } } } | Cancel grant on a node |
32,307 | public boolean addGrant ( ClusterNode node , String sessionId , ResourceRequestInfo req ) { synchronized ( node ) { if ( node . deleted ) { return false ; } if ( ! node . checkForGrant ( Utilities . getUnitResourceRequest ( req . getType ( ) ) , resourceLimit ) ) { return false ; } node . addGrant ( sessionId , req ) ; loadManager . incrementLoad ( req . getType ( ) ) ; hostsToSessions . get ( node ) . add ( sessionId ) ; if ( ! node . checkForGrant ( Utilities . getUnitResourceRequest ( req . getType ( ) ) , resourceLimit ) ) { RunnableIndices r = typeToIndices . get ( req . getType ( ) ) ; r . deleteRunnable ( node ) ; } } return true ; } | Add a grant to a node |
32,308 | public void restoreAfterSafeModeRestart ( ) throws IOException { if ( ! clusterManager . safeMode ) { throw new IOException ( "restoreAfterSafeModeRestart() called while the " + "Cluster Manager was not in Safe Mode" ) ; } for ( ClusterNode clusterNode : nameToNode . values ( ) ) { restoreClusterNode ( clusterNode ) ; } for ( ClusterNode clusterNode : nameToNode . values ( ) ) { for ( ResourceRequestInfo resourceRequestInfo : clusterNode . grants . values ( ) ) { restoreResourceRequestInfo ( resourceRequestInfo ) ; loadManager . incrementLoad ( resourceRequestInfo . getType ( ) ) ; } } } | This method rebuilds members related to the NodeManager instance which were not directly persisted themselves . |
32,309 | public void restoreResourceRequestInfo ( ResourceRequestInfo resourceRequestInfo ) { List < RequestedNode > requestedNodes = null ; List < String > hosts = resourceRequestInfo . getHosts ( ) ; if ( hosts != null && hosts . size ( ) > 0 ) { requestedNodes = new ArrayList < RequestedNode > ( hosts . size ( ) ) ; for ( String host : hosts ) { requestedNodes . add ( resolve ( host , resourceRequestInfo . getType ( ) ) ) ; } } resourceRequestInfo . nodes = requestedNodes ; } | This method rebuilds members related to a ResourceRequestInfo instance which were not directly persisted themselves . |
32,310 | public boolean heartbeat ( ClusterNodeInfo clusterNodeInfo ) throws DisallowedNode { ClusterNode node = nameToNode . get ( clusterNodeInfo . name ) ; if ( ! canAllowNode ( clusterNodeInfo . getAddress ( ) . getHost ( ) ) ) { if ( node != null ) { node . heartbeat ( clusterNodeInfo ) ; } else { throw new DisallowedNode ( clusterNodeInfo . getAddress ( ) . getHost ( ) ) ; } return false ; } boolean newNode = false ; Map < ResourceType , String > currentResources = clusterNodeInfo . getResourceInfos ( ) ; if ( currentResources == null ) { currentResources = new EnumMap < ResourceType , String > ( ResourceType . class ) ; } if ( node == null ) { LOG . info ( "Adding node with heartbeat: " + clusterNodeInfo . toString ( ) ) ; node = new ClusterNode ( clusterNodeInfo , topologyCache . getNode ( clusterNodeInfo . address . host ) , cpuToResourcePartitioning ) ; addNode ( node , currentResources ) ; newNode = true ; } node . heartbeat ( clusterNodeInfo ) ; boolean appsChanged = false ; Map < ResourceType , String > prevResources = nameToApps . get ( clusterNodeInfo . name ) ; Set < ResourceType > deletedApps = null ; for ( Map . Entry < ResourceType , String > entry : prevResources . entrySet ( ) ) { String newAppInfo = currentResources . get ( entry . getKey ( ) ) ; String oldAppInfo = entry . getValue ( ) ; if ( newAppInfo == null || ! newAppInfo . equals ( oldAppInfo ) ) { if ( deletedApps == null ) { deletedApps = EnumSet . noneOf ( ResourceType . class ) ; } deletedApps . add ( entry . getKey ( ) ) ; appsChanged = true ; } } Map < ResourceType , String > addedApps = null ; for ( Map . Entry < ResourceType , String > entry : currentResources . entrySet ( ) ) { String newAppInfo = entry . getValue ( ) ; String oldAppInfo = prevResources . get ( entry . getKey ( ) ) ; if ( oldAppInfo == null || ! oldAppInfo . equals ( newAppInfo ) ) { if ( addedApps == null ) { addedApps = new EnumMap < ResourceType , String > ( ResourceType . class ) ; } addedApps . put ( entry . getKey ( ) , entry . getValue ( ) ) ; appsChanged = true ; } } if ( deletedApps != null ) { for ( ResourceType deleted : deletedApps ) { clusterManager . nodeAppRemoved ( clusterNodeInfo . name , deleted ) ; } } if ( addedApps != null ) { for ( Map . Entry < ResourceType , String > added : addedApps . entrySet ( ) ) { addAppToNode ( node , added . getKey ( ) , added . getValue ( ) ) ; } } updateRunnability ( node ) ; return newNode || appsChanged ; } | return true if a new node has been added - else return false |
32,311 | public String getAppInfo ( ClusterNode node , ResourceType type ) { Map < ResourceType , String > resourceInfos = nameToApps . get ( node . getName ( ) ) ; if ( resourceInfos == null ) { return null ; } else { return resourceInfos . get ( type ) ; } } | Get information about applications running on a node . |
32,312 | public int getAllocatedCpuForType ( ResourceType type ) { int total = 0 ; for ( ClusterNode node : nameToNode . values ( ) ) { synchronized ( node ) { if ( node . deleted ) { continue ; } total += node . getAllocatedCpuForType ( type ) ; } } return total ; } | Find allocation for a resource type . |
32,313 | public List < String > getFreeNodesForType ( ResourceType type ) { ArrayList < String > freeNodes = new ArrayList < String > ( ) ; for ( Map . Entry < String , ClusterNode > entry : nameToNode . entrySet ( ) ) { ClusterNode node = entry . getValue ( ) ; synchronized ( node ) { if ( ! node . deleted && node . getMaxCpuForType ( type ) > node . getAllocatedCpuForType ( type ) ) { freeNodes . add ( entry . getKey ( ) + ": " + node . getFree ( ) . toString ( ) ) ; } } } return freeNodes ; } | Get a list nodes with free Cpu for a resource type |
32,314 | public void nodeFeedback ( String handle , List < ResourceType > resourceTypes , List < NodeUsageReport > reportList ) { for ( NodeUsageReport usageReport : reportList ) { faultManager . nodeFeedback ( usageReport . getNodeName ( ) , resourceTypes , usageReport ) ; } } | Process feedback about nodes . |
32,315 | void blacklistNode ( String nodeName , ResourceType resourceType ) { LOG . info ( "Node " + nodeName + " has been blacklisted for resource " + resourceType ) ; clusterManager . getMetrics ( ) . setBlacklistedNodes ( faultManager . getBlacklistedNodeCount ( ) ) ; deleteAppFromNode ( nodeName , resourceType ) ; } | Blacklist a resource on a node . |
32,316 | public RequestedNode resolve ( String host , ResourceType type ) { RunnableIndices indices = typeToIndices . get ( type ) ; return indices . getOrCreateRequestedNode ( host ) ; } | Resolve a host name . |
32,317 | public void resetNodesLastHeartbeatTime ( ) { long now = ClusterManager . clock . getTime ( ) ; for ( ClusterNode node : nameToNode . values ( ) ) { node . lastHeartbeatTime = now ; } } | This is required when we come out of safe mode and we need to reset the lastHeartbeatTime for each node |
32,318 | public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeFieldName ( "nameToNode" ) ; jsonGenerator . writeStartObject ( ) ; for ( Map . Entry < String , ClusterNode > entry : nameToNode . entrySet ( ) ) { jsonGenerator . writeFieldName ( entry . getKey ( ) ) ; entry . getValue ( ) . write ( jsonGenerator ) ; } jsonGenerator . writeEndObject ( ) ; Map < String , Set < String > > hostsToSessionsMap = new HashMap < String , Set < String > > ( ) ; for ( Map . Entry < ClusterNode , Set < String > > entry : hostsToSessions . entrySet ( ) ) { hostsToSessionsMap . put ( entry . getKey ( ) . getName ( ) , entry . getValue ( ) ) ; } jsonGenerator . writeObjectField ( "hostsToSessions" , hostsToSessionsMap ) ; jsonGenerator . writeObjectField ( "nameToApps" , nameToApps ) ; jsonGenerator . writeEndObject ( ) ; } | This method writes the state of the NodeManager to disk |
32,319 | public void setDelHints ( String delHints ) { if ( delHints == null || delHints . isEmpty ( ) ) { throw new IllegalArgumentException ( "DelHints is empty" ) ; } this . delHints = delHints ; } | Set this block s delHints |
32,320 | @ SuppressWarnings ( "unchecked" ) public void run ( RecordReader < K1 , V1 > input , OutputCollector < K2 , V2 > output , Reporter reporter ) throws IOException { Application < K1 , V1 , K2 , V2 > application = null ; try { RecordReader < FloatWritable , NullWritable > fakeInput = ( ! Submitter . getIsJavaRecordReader ( job ) && ! Submitter . getIsJavaMapper ( job ) ) ? ( RecordReader < FloatWritable , NullWritable > ) input : null ; application = new Application < K1 , V1 , K2 , V2 > ( job , fakeInput , output , reporter , ( Class < ? extends K2 > ) job . getOutputKeyClass ( ) , ( Class < ? extends V2 > ) job . getOutputValueClass ( ) ) ; } catch ( InterruptedException ie ) { throw new RuntimeException ( "interrupted" , ie ) ; } DownwardProtocol < K1 , V1 > downlink = application . getDownlink ( ) ; boolean isJavaInput = Submitter . getIsJavaRecordReader ( job ) ; downlink . runMap ( reporter . getInputSplit ( ) , job . getNumReduceTasks ( ) , isJavaInput ) ; boolean skipping = job . getBoolean ( "mapred.skip.on" , false ) ; try { if ( isJavaInput ) { K1 key = input . createKey ( ) ; V1 value = input . createValue ( ) ; downlink . setInputTypes ( key . getClass ( ) . getName ( ) , value . getClass ( ) . getName ( ) ) ; while ( input . next ( key , value ) ) { downlink . mapItem ( key , value ) ; if ( skipping ) { downlink . flush ( ) ; } } downlink . endOfInput ( ) ; } application . waitForFinish ( ) ; } catch ( Throwable t ) { application . abort ( t ) ; } finally { application . cleanup ( ) ; } } | Run the map task . |
32,321 | static void printUsage ( ) { System . err . println ( "Usage: DFSck <path> [-list-corruptfileblocks | " + "[-move | -delete | -openforwrite ] " + "[-files [-blocks [-locations | -racks]]]] " + "[-limit <limit>] [-service serviceName]" + "[-(zero/one)]" ) ; System . err . println ( "\t<path>\tstart checking from this path" ) ; System . err . println ( "\t-move\tmove corrupted files to /lost+found" ) ; System . err . println ( "\t-delete\tdelete corrupted files" ) ; System . err . println ( "\t-files\tprint out files being checked" ) ; System . err . println ( "\t-openforwrite\tprint out files opened for write" ) ; System . err . println ( "\t-list-corruptfileblocks\tprint out list of missing " + "blocks and files they belong to" ) ; System . err . println ( "\t-blocks\tprint out block report" ) ; System . err . println ( "\t-locations\tprint out locations for every block" ) ; System . err . println ( "\t-racks\tprint out network topology for data-node locations" ) ; System . err . println ( "\t-limit\tlimit output to <limit> corrupt files. " + "The default value of the limit is 500." ) ; System . err . println ( "\t\tBy default fsck ignores files opened for write, " + "use -openforwrite to report such files. They are usually " + " tagged CORRUPT or HEALTHY depending on their block " + "allocation status" ) ; ToolRunner . printGenericCommandUsage ( System . err ) ; } | Print fsck usage information |
32,322 | private Integer listCorruptFileBlocks ( String dir , int limit , String baseUrl ) throws IOException { int errCode = - 1 ; int numCorrupt = 0 ; int cookie = 0 ; String lastBlock = null ; final String noCorruptLine = "has no CORRUPT files" ; final String noMoreCorruptLine = "has no more CORRUPT files" ; final String cookiePrefix = "Cookie:" ; boolean allDone = false ; while ( ! allDone ) { final StringBuffer url = new StringBuffer ( baseUrl ) ; if ( cookie > 0 ) { url . append ( "&startblockafterIndex=" ) . append ( String . valueOf ( cookie ) ) ; } else if ( lastBlock != null ) { url . append ( "&startblockafter=" ) . append ( lastBlock ) ; } URL path = new URL ( url . toString ( ) ) ; URLConnection connection = path . openConnection ( ) ; InputStream stream = connection . getInputStream ( ) ; BufferedReader input = new BufferedReader ( new InputStreamReader ( stream , "UTF-8" ) ) ; try { String line = null ; while ( ( line = input . readLine ( ) ) != null ) { if ( line . startsWith ( cookiePrefix ) ) { try { cookie = Integer . parseInt ( line . split ( "\t" ) [ 1 ] ) ; } catch ( Exception e ) { allDone = true ; break ; } continue ; } if ( ( line . endsWith ( noCorruptLine ) ) || ( line . endsWith ( noMoreCorruptLine ) ) || ( line . endsWith ( NamenodeFsck . HEALTHY_STATUS ) ) || ( line . endsWith ( NamenodeFsck . NONEXISTENT_STATUS ) ) || numCorrupt >= limit ) { allDone = true ; break ; } if ( ( line . isEmpty ( ) ) || ( line . startsWith ( "FSCK started by" ) ) || ( line . startsWith ( "Unable to locate any corrupt files under" ) ) || ( line . startsWith ( "The filesystem under path" ) ) ) continue ; numCorrupt ++ ; if ( numCorrupt == 1 ) { out . println ( "The list of corrupt files under path '" + dir + "' are:" ) ; } out . println ( line ) ; try { lastBlock = line . split ( "\t" ) [ 0 ] ; } catch ( Exception e ) { allDone = true ; break ; } } } finally { input . close ( ) ; } } out . println ( "The filesystem under path '" + dir + "' has " + numCorrupt + " CORRUPT files" ) ; if ( numCorrupt == 0 ) errCode = 0 ; return errCode ; } | To get the list we need to call iteratively until the server says there is no more left . |
32,323 | private static void updateConfKeys ( Configuration conf , String suffix , String nameserviceId ) { String value = conf . get ( FSConstants . DFS_NAMENODE_HTTP_ADDRESS_KEY + suffix + ( nameserviceId . isEmpty ( ) ? "" : ( "." + nameserviceId ) ) ) ; if ( value != null ) { conf . set ( FSConstants . DFS_NAMENODE_HTTP_ADDRESS_KEY , value ) ; } } | For federated and avatar clusters we need update the http key . |
32,324 | private static boolean optionExist ( String args [ ] , String opt ) { for ( String arg : args ) { if ( arg . equalsIgnoreCase ( opt ) ) { return true ; } } return false ; } | Check if the option exist in the given arguments . |
32,325 | public static int parseHashType ( String name ) { if ( "jenkins" . equalsIgnoreCase ( name ) ) { return JENKINS_HASH ; } else if ( "murmur" . equalsIgnoreCase ( name ) ) { return MURMUR_HASH ; } else { return INVALID_HASH ; } } | This utility method converts String representation of hash function name to a symbolic constant . Currently two function types are supported jenkins and murmur . |
32,326 | public static Hash getInstance ( int type ) { switch ( type ) { case JENKINS_HASH : return JenkinsHash . getInstance ( ) ; case MURMUR_HASH : return MurmurHash . getInstance ( ) ; default : return null ; } } | Get a singleton instance of hash function of a given type . |
32,327 | public static int getVersion ( URI editsURI ) throws IOException { if ( editsURI . getScheme ( ) . equals ( NNStorage . LOCAL_URI_SCHEME ) ) { StorageDirectory sd = new NNStorage ( new StorageInfo ( ) ) . new StorageDirectory ( new File ( editsURI . getPath ( ) ) ) ; File versionFile = sd . getVersionFile ( ) ; if ( ! versionFile . exists ( ) ) { throw new IOException ( "No VERSION file in: " + editsURI + "version file: " + versionFile ) ; } Properties props = Storage . getProps ( versionFile ) ; String layout = props . getProperty ( Storage . LAYOUT_VERSION ) ; if ( layout == null ) { throw new IOException ( "No layout version in: " + editsURI ) ; } return Integer . valueOf ( layout ) ; } else { throw new IOException ( "Non file journals not supported yet." ) ; } } | Read version file from the given directory and return the layout stored therein . |
32,328 | public static File uriToFile ( URI u ) throws IOException { if ( ! u . getScheme ( ) . equals ( NNStorage . LOCAL_URI_SCHEME ) ) { throw new IOException ( "URI does not represent a file" ) ; } return new File ( u . getPath ( ) ) ; } | Get file associated with the given URI |
32,329 | public static List < String > getAllAncestors ( String eventPath ) { if ( eventPath == null || ! eventPath . startsWith ( Path . SEPARATOR ) ) { return null ; } if ( eventPath . equals ( Path . SEPARATOR ) ) { return Arrays . asList ( Path . SEPARATOR ) ; } List < String > ancestors = new ArrayList < String > ( ) ; while ( eventPath . length ( ) > 0 ) { ancestors . add ( eventPath ) ; eventPath = eventPath . substring ( 0 , eventPath . lastIndexOf ( Path . SEPARATOR ) ) ; } ancestors . add ( Path . SEPARATOR ) ; return ancestors ; } | return all the ancestors of the given path include itself . |
32,330 | synchronized public String addJob ( Job aJob ) { String id = this . getNextJobID ( ) ; aJob . setJobID ( id ) ; aJob . setState ( Job . WAITING ) ; this . addToQueue ( aJob ) ; return id ; } | Add a new job . |
32,331 | private synchronized void updateBlockInfo ( LogEntry e ) { BlockScanInfo info = blockMap . get ( new Block ( e . blockId , 0 , e . genStamp ) ) ; if ( info != null && e . verificationTime > 0 && info . lastScanTime < e . verificationTime ) { delBlockInfo ( info ) ; info . lastScanTime = e . verificationTime ; info . lastScanType = ScanType . VERIFICATION_SCAN ; addBlockInfo ( info ) ; } } | Update blockMap by the given LogEntry |
32,332 | synchronized void addBlock ( Block block ) { if ( ! isInitialized ( ) ) { return ; } BlockScanInfo info = blockMap . get ( block ) ; if ( info != null ) { LOG . warn ( "Adding an already existing block " + block ) ; delBlockInfo ( info ) ; } info = new BlockScanInfo ( block ) ; info . lastScanTime = getNewBlockScanTime ( ) ; addBlockInfo ( info ) ; adjustThrottler ( ) ; } | Adds block to list of blocks |
32,333 | synchronized void deleteBlock ( Block block ) { if ( ! isInitialized ( ) ) { return ; } BlockScanInfo info = blockMap . get ( block ) ; if ( info != null ) { delBlockInfo ( info ) ; } } | Deletes the block from internal structures |
32,334 | private void verifyFirstBlock ( ) { BlockScanInfo block = null ; synchronized ( this ) { if ( blockInfoSet . size ( ) > 0 ) { block = blockInfoSet . first ( ) ; } } if ( block != null ) { verifyBlock ( block ) ; processedBlocks . add ( block . block . getBlockId ( ) ) ; } } | Picks one block and verifies it |
32,335 | private static void checkSrcPath ( Configuration conf , List < Path > srcPaths ) throws IOException { List < IOException > rslt = new ArrayList < IOException > ( ) ; List < Path > unglobbed = new LinkedList < Path > ( ) ; for ( Path p : srcPaths ) { FileSystem fs = p . getFileSystem ( conf ) ; FileStatus [ ] inputs = fs . globStatus ( p ) ; if ( inputs != null && inputs . length > 0 ) { for ( FileStatus onePath : inputs ) { unglobbed . add ( onePath . getPath ( ) ) ; } } else { rslt . add ( new IOException ( "Input source " + p + " does not exist." ) ) ; } } if ( ! rslt . isEmpty ( ) ) { throw new InvalidInputException ( rslt ) ; } srcPaths . clear ( ) ; srcPaths . addAll ( unglobbed ) ; } | Sanity check for srcPath |
32,336 | public static DistCopier getCopier ( final Configuration conf , final Arguments args ) throws IOException { DistCopier dc = new DistCopier ( conf , args ) ; dc . setupJob ( ) ; if ( dc . getJobConf ( ) != null ) { return dc ; } else { return null ; } } | Return a DistCopier object for copying the files . |
32,337 | static void copy ( final Configuration conf , final Arguments args ) throws IOException { DistCopier copier = getCopier ( conf , args ) ; if ( copier != null ) { try { JobClient client = copier . getJobClient ( ) ; RunningJob job = client . submitJob ( copier . getJobConf ( ) ) ; try { if ( ! client . monitorAndPrintJob ( copier . getJobConf ( ) , job ) ) { throw new IOException ( "Job failed!" ) ; } } catch ( InterruptedException ie ) { Thread . currentThread ( ) . interrupt ( ) ; } copier . finalizeCopiedFiles ( ) ; } finally { copier . cleanupJob ( ) ; } } } | Driver to copy srcPath to destPath depending on required protocol . |
32,338 | public int run ( String [ ] args ) { try { copy ( conf , Arguments . valueOf ( args , conf ) ) ; return 0 ; } catch ( IllegalArgumentException e ) { System . err . println ( StringUtils . stringifyException ( e ) + "\n" + usage ) ; ToolRunner . printGenericCommandUsage ( System . err ) ; return - 1 ; } catch ( InvalidInputException e ) { System . err . println ( StringUtils . stringifyException ( e ) + "\n" ) ; return - 1 ; } catch ( DuplicationException e ) { System . err . println ( StringUtils . stringifyException ( e ) ) ; return DuplicationException . ERROR_CODE ; } catch ( RemoteException e ) { final IOException unwrapped = e . unwrapRemoteException ( FileNotFoundException . class , AccessControlException . class , QuotaExceededException . class ) ; System . err . println ( StringUtils . stringifyException ( unwrapped ) ) ; return - 3 ; } catch ( Exception e ) { System . err . println ( "With failures, global counters are inaccurate; " + "consider running with -i" ) ; System . err . println ( "Copy failed: " + StringUtils . stringifyException ( e ) ) ; return - 999 ; } } | This is the main driver for recursively copying directories across file systems . It takes at least two cmdline parameters . A source URL and a destination URL . It then essentially does an ls - lR on the source URL and writes the output in a round - robin manner to all the map input files . The mapper actually copies the files allotted to it . The reduce is empty . |
32,339 | static String makeRelative ( Path root , Path absPath ) { if ( ! absPath . isAbsolute ( ) ) { throw new IllegalArgumentException ( "!absPath.isAbsolute(), absPath=" + absPath ) ; } String p = absPath . toUri ( ) . getPath ( ) ; StringTokenizer pathTokens = new StringTokenizer ( p , "/" ) ; for ( StringTokenizer rootTokens = new StringTokenizer ( root . toUri ( ) . getPath ( ) , "/" ) ; rootTokens . hasMoreTokens ( ) ; ) { if ( ! rootTokens . nextToken ( ) . equals ( pathTokens . nextToken ( ) ) ) { return null ; } } StringBuilder sb = new StringBuilder ( ) ; for ( ; pathTokens . hasMoreTokens ( ) ; ) { sb . append ( pathTokens . nextToken ( ) ) ; if ( pathTokens . hasMoreTokens ( ) ) { sb . append ( Path . SEPARATOR ) ; } } return sb . length ( ) == 0 ? "." : sb . toString ( ) ; } | Make a path relative with respect to a root path . absPath is always assumed to descend from root . Otherwise returned path is null . |
32,340 | static void fullyDelete ( String dir , Configuration conf ) throws IOException { if ( dir != null ) { Path tmp = new Path ( dir ) ; tmp . getFileSystem ( conf ) . delete ( tmp , true ) ; } } | Fully delete dir |
32,341 | private static int setReducerCount ( long fileCount , JobConf job , JobClient client ) { int numReducers = Math . max ( job . getInt ( MAX_REDUCE_LABEL , MAX_REDUCERS_DEFAULT ) , ( int ) ( fileCount / job . getInt ( MAX_FILES_PER_REDUCER_LABEL , MAX_FILES_PER_REDUCER_DEFAULT ) ) ) ; numReducers = Math . max ( numReducers , 1 ) ; job . setNumReduceTasks ( numReducers ) ; return numReducers ; } | Calculate how many reducers to run . |
32,342 | static private boolean sameFile ( FileSystem srcfs , FileStatus srcstatus , FileSystem dstfs , Path dstpath , boolean skipCRCCheck ) throws IOException { FileStatus dststatus ; try { dststatus = dstfs . getFileStatus ( dstpath ) ; } catch ( FileNotFoundException fnfe ) { return false ; } if ( srcstatus . getLen ( ) != dststatus . getLen ( ) ) { return false ; } if ( skipCRCCheck ) { LOG . debug ( "Skipping CRC Check" ) ; return true ; } final FileChecksum srccs ; try { srccs = srcfs . getFileChecksum ( srcstatus . getPath ( ) ) ; } catch ( FileNotFoundException fnfe ) { return true ; } try { final FileChecksum dstcs = dstfs . getFileChecksum ( dststatus . getPath ( ) ) ; return srccs == null || dstcs == null || srccs . equals ( dstcs ) ; } catch ( FileNotFoundException fnfe ) { return false ; } } | Check whether the contents of src and dst are the same . |
32,343 | static private boolean isAncestorPath ( String x , String y ) { if ( ! y . startsWith ( x ) ) { return false ; } final int len = x . length ( ) ; return y . length ( ) == len || y . charAt ( len ) == Path . SEPARATOR_CHAR ; } | is x an ancestor path of y? |
32,344 | static private void checkDuplication ( FileSystem fs , Path file , Path sorted , Configuration conf ) throws IOException { SequenceFile . Reader in = null ; try { SequenceFile . Sorter sorter = new SequenceFile . Sorter ( fs , new Text . Comparator ( ) , Text . class , Text . class , conf ) ; sorter . sort ( file , sorted ) ; in = new SequenceFile . Reader ( fs , sorted , conf ) ; Text prevdst = null , curdst = new Text ( ) ; Text prevsrc = null , cursrc = new Text ( ) ; for ( ; in . next ( curdst , cursrc ) ; ) { if ( prevdst != null && curdst . equals ( prevdst ) ) { throw new DuplicationException ( "Invalid input, there are duplicated files in the sources: " + prevsrc + ", " + cursrc ) ; } prevdst = curdst ; curdst = new Text ( ) ; prevsrc = cursrc ; cursrc = new Text ( ) ; } } finally { checkAndClose ( in ) ; } } | Check whether the file list have duplication . |
32,345 | List < Job > getRemainingJobs ( ) { if ( mThread . isAlive ( ) ) { LOG . warn ( "Internal error: Polling running monitor for jobs" ) ; } synchronized ( mJobs ) { return new ArrayList < Job > ( mJobs ) ; } } | If shutdown before all jobs have completed any still - running jobs may be extracted from the component . |
32,346 | private static LocatedBlock convertLocatedBlock ( TLocatedBlock tblk ) { TBlock one = tblk . block ; Block hblock = new Block ( one . getBlockId ( ) , one . getNumBytes ( ) , one . getGenerationStamp ( ) ) ; List < TDatanodeID > locs = tblk . location ; DatanodeInfo [ ] dn = new DatanodeInfo [ locs . size ( ) ] ; for ( int j = 0 ; j < dn . length ; j ++ ) { String name = locs . get ( j ) . name ; dn [ j ] = new DatanodeInfo ( new DatanodeID ( name , "" , - 1 , getPort ( name ) ) ) ; } return new LocatedBlock ( hblock , dn ) ; } | Converts a TLocatedBlock to a LocatedBlock |
32,347 | private static Block convertBlock ( TBlock tblk ) { return new Block ( tblk . getBlockId ( ) , tblk . getNumBytes ( ) , tblk . getGenerationStamp ( ) ) ; } | Converts a TBlock to a Block |
32,348 | private ServerSocket createServerSocket ( int port ) throws IOException { try { ServerSocket sock = new ServerSocket ( ) ; sock . setReuseAddress ( true ) ; if ( port == 0 ) { sock . bind ( null ) ; serverPort = sock . getLocalPort ( ) ; } else { sock . bind ( new InetSocketAddress ( port ) ) ; } return sock ; } catch ( IOException ioe ) { throw new IOException ( "Could not create ServerSocket on port " + port + "." + ioe ) ; } } | Bind to port . If the specified port is 0 then bind to random port . |
32,349 | public void setArgs ( String args ) { for ( String s : args . trim ( ) . split ( "\\s*,\\s*" ) ) argv . add ( s ) ; } | Sets the argument list from a String of comma - separated values . |
32,350 | public void setOut ( String outprop ) { this . outprop = outprop ; out = new ByteArrayOutputStream ( ) ; if ( outprop . equals ( errprop ) ) err = out ; } | Sets the property into which System . out will be written . |
32,351 | public void setErr ( String errprop ) { this . errprop = errprop ; err = ( errprop . equals ( outprop ) ) ? err = out : new ByteArrayOutputStream ( ) ; } | Sets the property into which System . err will be written . If this property has the same name as the property for System . out the two will be interlaced . |
32,352 | protected void pushContext ( ) { antOut = System . out ; antErr = System . err ; System . setOut ( new PrintStream ( out ) ) ; System . setErr ( out == err ? System . out : new PrintStream ( err ) ) ; } | Save the current values of System . out System . err and configure output streams for FsShell . |
32,353 | protected void popContext ( ) { if ( outprop != null && ! System . out . checkError ( ) ) getProject ( ) . setNewProperty ( outprop , out . toString ( ) ) ; if ( out != err && errprop != null && ! System . err . checkError ( ) ) getProject ( ) . setNewProperty ( errprop , err . toString ( ) ) ; System . setErr ( antErr ) ; System . setOut ( antOut ) ; confloader . cleanup ( ) ; confloader . setParent ( null ) ; } | Create the appropriate output properties with their respective output restore System . out System . err and release any resources from created ClassLoaders to aid garbage collection . |
32,354 | private void printConf ( PrintWriter out , Reconfigurable reconf ) { Configuration oldConf = reconf . getConf ( ) ; Configuration newConf = new Configuration ( ) ; if ( reconf instanceof ReconfigurableBase ) ( ( ReconfigurableBase ) reconf ) . preProcessConfiguration ( newConf ) ; Collection < ReconfigurationUtil . PropertyChange > changes = ReconfigurationUtil . getChangedProperties ( newConf , oldConf ) ; boolean changeOK = true ; out . println ( "<form action=\"\" method=\"post\">" ) ; out . println ( "<table border=\"1\">" ) ; out . println ( "<tr><th>Property</th><th>Old value</th>" ) ; out . println ( "<th>New value </th></tr>" ) ; for ( ReconfigurationUtil . PropertyChange c : changes ) { out . print ( "<tr><td>" ) ; if ( ! reconf . isPropertyReconfigurable ( c . prop ) ) { out . print ( "<font color=\"red\">" + StringEscapeUtils . escapeHtml ( c . prop ) + "</font>" ) ; changeOK = false ; } else { out . print ( StringEscapeUtils . escapeHtml ( c . prop ) ) ; out . print ( "<input type=\"hidden\" name=\"" + StringEscapeUtils . escapeHtml ( c . prop ) + "\" value=\"" + StringEscapeUtils . escapeHtml ( c . newVal ) + "\"/>" ) ; } out . print ( "</td><td>" + ( c . oldVal == null ? "<it>default</it>" : StringEscapeUtils . escapeHtml ( c . oldVal ) ) + "</td><td>" + ( c . newVal == null ? "<it>default</it>" : StringEscapeUtils . escapeHtml ( c . newVal ) ) + "</td>" ) ; out . print ( "</tr>\n" ) ; } out . println ( "</table>" ) ; if ( ! changeOK ) { out . println ( "<p><font color=\"red\">WARNING: properties marked red" + " will not be changed until the next restart.</font></p>" ) ; } out . println ( "<input type=\"submit\" value=\"Apply\" />" ) ; out . println ( "</form>" ) ; } | Print configuration options that can be changed . |
32,355 | private void applyChanges ( PrintWriter out , Reconfigurable reconf , HttpServletRequest req ) throws IOException , ReconfigurationException { Configuration oldConf = reconf . getConf ( ) ; Configuration newConf = new Configuration ( ) ; if ( reconf instanceof ReconfigurableBase ) ( ( ReconfigurableBase ) reconf ) . preProcessConfiguration ( newConf ) ; Enumeration < String > params = getParams ( req ) ; synchronized ( oldConf ) { while ( params . hasMoreElements ( ) ) { String rawParam = params . nextElement ( ) ; String param = StringEscapeUtils . unescapeHtml ( rawParam ) ; String value = StringEscapeUtils . unescapeHtml ( req . getParameter ( rawParam ) ) ; if ( reconf instanceof ReconfigurableBase ) param = ( ( ReconfigurableBase ) reconf ) . preProcessKey ( param ) ; if ( value != null ) { if ( value . equals ( newConf . getRaw ( param ) ) || value . equals ( "default" ) || value . equals ( "null" ) || value . equals ( "" ) ) { if ( ( value . equals ( "default" ) || value . equals ( "null" ) || value . equals ( "" ) ) && oldConf . getRaw ( param ) != null ) { out . println ( "<p>Changed \"" + StringEscapeUtils . escapeHtml ( param ) + "\" from \"" + StringEscapeUtils . escapeHtml ( oldConf . getRaw ( param ) ) + "\" to default</p>" ) ; reconf . reconfigureProperty ( param , null ) ; } else if ( ! value . equals ( "default" ) && ! value . equals ( "null" ) && ! value . equals ( "" ) && ( oldConf . getRaw ( param ) == null || ! oldConf . getRaw ( param ) . equals ( value ) ) ) { if ( oldConf . getRaw ( param ) == null ) { out . println ( "<p>Changed \"" + StringEscapeUtils . escapeHtml ( param ) + "\" from default to \"" + StringEscapeUtils . escapeHtml ( value ) + "\"</p>" ) ; } else { out . println ( "<p>Changed \"" + StringEscapeUtils . escapeHtml ( param ) + "\" from \"" + StringEscapeUtils . escapeHtml ( oldConf . getRaw ( param ) ) + "\" to \"" + StringEscapeUtils . escapeHtml ( value ) + "\"</p>" ) ; } reconf . reconfigureProperty ( param , value ) ; } else { LOG . info ( "property " + param + " unchanged" ) ; } } else { out . println ( "<p>\"" + StringEscapeUtils . escapeHtml ( param ) + "\" not changed because value has changed from \"" + StringEscapeUtils . escapeHtml ( value ) + "\" to \"" + StringEscapeUtils . escapeHtml ( newConf . getRaw ( param ) ) + "\" since approval</p>" ) ; } } } } } | Apply configuration changes after admin has approved them . |
32,356 | public static boolean isCompatibleClientProtocol ( long clientVersion , long serverVersion ) { return clientVersion == serverVersion || ( ( clientVersion == ClientProtocol . OPTIMIZE_FILE_STATUS_VERSION - 1 || clientVersion == ClientProtocol . OPTIMIZE_FILE_STATUS_VERSION || clientVersion == ClientProtocol . ITERATIVE_LISTING_VERSION || clientVersion == ClientProtocol . BULK_BLOCK_LOCATIONS_VERSION || clientVersion == ClientProtocol . CONCAT_VERSION || clientVersion == ClientProtocol . LIST_CORRUPT_FILEBLOCKS_VERSION || clientVersion == ClientProtocol . SAVENAMESPACE_FORCE || clientVersion == ClientProtocol . RECOVER_LEASE_VERSION || clientVersion == ClientProtocol . CLOSE_RECOVER_LEASE_VERSION ) && ( serverVersion == ClientProtocol . OPTIMIZE_FILE_STATUS_VERSION - 1 || serverVersion == ClientProtocol . OPTIMIZE_FILE_STATUS_VERSION || serverVersion == ClientProtocol . ITERATIVE_LISTING_VERSION || serverVersion == ClientProtocol . BULK_BLOCK_LOCATIONS_VERSION || serverVersion == ClientProtocol . CONCAT_VERSION || serverVersion == ClientProtocol . LIST_CORRUPT_FILEBLOCKS_VERSION || serverVersion == ClientProtocol . SAVENAMESPACE_FORCE || serverVersion == ClientProtocol . RECOVER_LEASE_VERSION || serverVersion == ClientProtocol . CLOSE_RECOVER_LEASE_VERSION ) ) ; } | Check if the client and NameNode have compatible ClientProtocol versions |
32,357 | public static boolean isCompatibleClientDatanodeProtocol ( long clientVersion , long serverVersion ) { return clientVersion == serverVersion || ( ( clientVersion == ClientDatanodeProtocol . GET_BLOCKINFO_VERSION - 1 || clientVersion == ClientDatanodeProtocol . GET_BLOCKINFO_VERSION || clientVersion == ClientDatanodeProtocol . COPY_BLOCK_VERSION ) && ( serverVersion == ClientDatanodeProtocol . GET_BLOCKINFO_VERSION - 1 || serverVersion == ClientDatanodeProtocol . GET_BLOCKINFO_VERSION || serverVersion == ClientDatanodeProtocol . COPY_BLOCK_VERSION ) ) ; } | Check if the client and DataNode have compatible ClientDataNodeProtocol versions |
32,358 | public void stop ( ) { if ( stopRequested ) { return ; } stopRequested = true ; running = false ; if ( server != null ) server . stop ( ) ; if ( triggerThread != null ) triggerThread . interrupt ( ) ; if ( fileFixer != null ) fileFixer . shutdown ( ) ; if ( fileFixerThread != null ) fileFixerThread . interrupt ( ) ; if ( myMetrics != null ) { myMetrics . shutdown ( ) ; } } | Stop all HighTideNode threads and wait for all to finish . |
32,359 | void shutdown ( ) throws IOException , InterruptedException { configMgr . stopReload ( ) ; fileFixer . shutdown ( ) ; fileFixerThread . interrupt ( ) ; server . stop ( ) ; } | Shuts down the HighTideNode |
32,360 | public static HighTideNode createHighTideNode ( String argv [ ] , Configuration conf ) throws IOException { if ( conf == null ) { conf = new Configuration ( ) ; } StartupOption startOpt = parseArguments ( argv ) ; if ( startOpt == null ) { printUsage ( ) ; return null ; } setStartupOption ( conf , startOpt ) ; HighTideNode node = new HighTideNode ( conf ) ; return node ; } | Create an instance of the HighTideNode |
32,361 | public static Path [ ] stat2Paths ( FileStatus [ ] stats ) { if ( stats == null ) return null ; Path [ ] ret = new Path [ stats . length ] ; for ( int i = 0 ; i < stats . length ; ++ i ) { ret [ i ] = stats [ i ] . getPath ( ) ; } return ret ; } | convert an array of FileStatus to an array of Path |
32,362 | public static Path [ ] stat2Paths ( FileStatus [ ] stats , Path path ) { if ( stats == null ) return new Path [ ] { path } ; else return stat2Paths ( stats ) ; } | convert an array of FileStatus to an array of Path . If stats if null return path |
32,363 | public static boolean fullyDelete ( File dir ) throws IOException { boolean deleted = true ; File contents [ ] = dir . listFiles ( ) ; if ( contents != null ) { for ( int i = 0 ; i < contents . length ; i ++ ) { if ( contents [ i ] . isFile ( ) ) { if ( ! contents [ i ] . delete ( ) ) { deleted = false ; } } else { boolean b = false ; b = contents [ i ] . delete ( ) ; if ( b ) { continue ; } if ( ! fullyDelete ( contents [ i ] ) ) { deleted = false ; } } } } return dir . delete ( ) && deleted ; } | Delete a directory and all its contents . If we return false the directory may be partially - deleted . |
32,364 | public static void fullyDelete ( FileSystem fs , Path dir ) throws IOException { fs . delete ( dir , true ) ; } | Recursively delete a directory . |
32,365 | public static boolean copy ( FileSystem srcFS , Path src , FileSystem dstFS , Path dst , boolean deleteSource , Configuration conf ) throws IOException { return copy ( srcFS , src , dstFS , dst , deleteSource , true , conf ) ; } | Copy files between FileSystems . |
32,366 | public static boolean copy ( FileSystem srcFS , Path src , FileSystem dstFS , Path dst , boolean deleteSource , boolean overwrite , boolean validate , Configuration conf , IOThrottler throttler ) throws IOException { dst = checkDest ( src . getName ( ) , dstFS , dst , overwrite ) ; FileStatus srcFileStatus = srcFS . getFileStatus ( src ) ; if ( srcFileStatus == null ) { throw new FileNotFoundException ( "File not found: " + src ) ; } if ( srcFileStatus . isDir ( ) ) { checkDependencies ( srcFS , src , dstFS , dst ) ; if ( ! dstFS . mkdirs ( dst ) ) { return false ; } FileStatus contents [ ] = srcFS . listStatus ( src ) ; for ( int i = 0 ; i < contents . length ; i ++ ) { copy ( srcFS , contents [ i ] . getPath ( ) , dstFS , new Path ( dst , contents [ i ] . getPath ( ) . getName ( ) ) , deleteSource , overwrite , validate , conf , throttler ) ; } } else { InputStream in = null ; OutputStream out = null ; try { in = srcFS . open ( src ) ; out = dstFS . create ( dst , overwrite ) ; IOUtils . copyBytes ( in , out , conf . getInt ( "io.file.buffer.size" , 4096 ) , true , throttler ) ; if ( validate ) { InjectionHandler . processEventIO ( InjectionEventCore . FILE_TRUNCATION , dstFS , dst ) ; FileStatus dstFileStatus = dstFS . getFileStatus ( dst ) ; if ( dstFileStatus == null || dstFileStatus . getLen ( ) != srcFileStatus . getLen ( ) ) { throw new IOException ( "Mismatched file length: src=" + src + " dst=" + dst ) ; } } } catch ( IOException e ) { IOUtils . closeStream ( out ) ; IOUtils . closeStream ( in ) ; dstFS . delete ( dst , true ) ; throw e ; } } if ( deleteSource ) { return srcFS . delete ( src , true ) ; } else { return true ; } } | Copy files between file systems |
32,367 | public static boolean copy ( File src , FileSystem dstFS , Path dst , boolean deleteSource , Configuration conf ) throws IOException { dst = checkDest ( src . getName ( ) , dstFS , dst , false ) ; if ( src . isDirectory ( ) ) { if ( ! dstFS . mkdirs ( dst ) ) { return false ; } File contents [ ] = src . listFiles ( ) ; for ( int i = 0 ; i < contents . length ; i ++ ) { copy ( contents [ i ] , dstFS , new Path ( dst , contents [ i ] . getName ( ) ) , deleteSource , conf ) ; } } else if ( src . isFile ( ) ) { InputStream in = null ; OutputStream out = null ; try { in = new FileInputStream ( src ) ; out = dstFS . create ( dst ) ; IOUtils . copyBytes ( in , out , conf ) ; } catch ( IOException e ) { IOUtils . closeStream ( out ) ; IOUtils . closeStream ( in ) ; throw e ; } } else { throw new IOException ( src . toString ( ) + ": No such file or directory" ) ; } if ( deleteSource ) { return FileUtil . fullyDelete ( src ) ; } else { return true ; } } | Copy local files to a FileSystem . |
32,368 | public static boolean copy ( FileSystem srcFS , Path src , File dst , boolean deleteSource , Configuration conf ) throws IOException { return copy ( srcFS , src , dst , deleteSource , conf , false , 0L ) ; } | Copy FileSystem files to local files . |
32,369 | public static long getDU ( File dir ) { long size = 0 ; if ( ! dir . exists ( ) ) return 0 ; if ( ! dir . isDirectory ( ) ) { return dir . length ( ) ; } else { size = dir . length ( ) ; File [ ] allFiles = dir . listFiles ( ) ; for ( int i = 0 ; i < allFiles . length ; i ++ ) { size = size + getDU ( allFiles [ i ] ) ; } return size ; } } | Takes an input dir and returns the du on that local directory . Very basic implementation . |
32,370 | public static void unZip ( File inFile , File unzipDir ) throws IOException { Enumeration < ? extends ZipEntry > entries ; ZipFile zipFile = new ZipFile ( inFile ) ; try { entries = zipFile . entries ( ) ; while ( entries . hasMoreElements ( ) ) { ZipEntry entry = entries . nextElement ( ) ; if ( ! entry . isDirectory ( ) ) { InputStream in = zipFile . getInputStream ( entry ) ; try { File file = new File ( unzipDir , entry . getName ( ) ) ; if ( ! file . getParentFile ( ) . mkdirs ( ) ) { if ( ! file . getParentFile ( ) . isDirectory ( ) ) { throw new IOException ( "Mkdirs failed to create " + file . getParentFile ( ) . toString ( ) ) ; } } OutputStream out = new FileOutputStream ( file ) ; try { byte [ ] buffer = new byte [ 8192 ] ; int i ; while ( ( i = in . read ( buffer ) ) != - 1 ) { out . write ( buffer , 0 , i ) ; } } finally { out . close ( ) ; } } finally { in . close ( ) ; } } } } finally { zipFile . close ( ) ; } } | Given a File input it will unzip the file in a the unzip directory passed as the second parameter |
32,371 | public static void unTar ( File inFile , File untarDir ) throws IOException { if ( ! untarDir . mkdirs ( ) ) { if ( ! untarDir . isDirectory ( ) ) { throw new IOException ( "Mkdirs failed to create " + untarDir ) ; } } StringBuffer untarCommand = new StringBuffer ( ) ; boolean gzipped = inFile . toString ( ) . endsWith ( "gz" ) ; if ( gzipped ) { untarCommand . append ( " gzip -dc '" ) ; untarCommand . append ( FileUtil . makeShellPath ( inFile ) ) ; untarCommand . append ( "' | (" ) ; } untarCommand . append ( "cd '" ) ; untarCommand . append ( FileUtil . makeShellPath ( untarDir ) ) ; untarCommand . append ( "' ; " ) ; untarCommand . append ( "tar -xf " ) ; if ( gzipped ) { untarCommand . append ( " -)" ) ; } else { untarCommand . append ( FileUtil . makeShellPath ( inFile ) ) ; } String [ ] shellCmd = { "bash" , "-c" , untarCommand . toString ( ) } ; ShellCommandExecutor shexec = new ShellCommandExecutor ( shellCmd ) ; shexec . execute ( ) ; int exitcode = shexec . getExitCode ( ) ; if ( exitcode != 0 ) { throw new IOException ( "Error untarring file " + inFile + ". Tar process exited with exit code " + exitcode ) ; } } | Given a Tar File as input it will untar the file in a the untar directory passed as the second parameter |
32,372 | public static int symLink ( String target , String linkname ) throws IOException { String cmd = "ln -s " + target + " " + linkname ; Process p = Runtime . getRuntime ( ) . exec ( cmd , null ) ; int returnVal = - 1 ; try { returnVal = p . waitFor ( ) ; } catch ( InterruptedException e ) { } return returnVal ; } | Create a soft link between a src and destination only on a local disk . HDFS does not support this |
32,373 | public static int chmod ( String filename , String perm ) throws IOException , InterruptedException { return chmod ( filename , perm , false ) ; } | Change the permissions on a filename . |
32,374 | public static final File createLocalTempFile ( final File basefile , final String prefix , final boolean isDeleteOnExit ) throws IOException { File tmp = File . createTempFile ( prefix + basefile . getName ( ) , "" , basefile . getParentFile ( ) ) ; if ( isDeleteOnExit ) { tmp . deleteOnExit ( ) ; } return tmp ; } | Create a tmp file for a base file . |
32,375 | public static List < FileStatus > listStatusHelper ( FileSystem fs , Path path , int depth , List < FileStatus > acc ) throws IOException { FileStatus [ ] fileStatusResults = fs . listStatus ( path ) ; if ( fileStatusResults == null ) { throw new IOException ( "Path does not exist: " + path ) ; } for ( FileStatus f : fileStatusResults ) { Path subPath = f . getPath ( ) ; if ( ! f . isDir ( ) ) { acc . add ( f ) ; } else { if ( depth > 1 ) { listStatusHelper ( fs , subPath , depth - 1 , acc ) ; } else { acc . add ( f ) ; } } } return acc ; } | Core logic for listStatus |
32,376 | public static void listStatusForLeafDir ( FileSystem fs , FileStatus pathStatus , List < FileStatus > acc ) throws IOException { if ( ! pathStatus . isDir ( ) ) return ; FileStatus [ ] fileStatusResults = fs . listStatus ( pathStatus . getPath ( ) ) ; if ( fileStatusResults == null ) { throw new IOException ( "Path does not exist: " + pathStatus . getPath ( ) ) ; } boolean leafDir = true ; for ( FileStatus f : fileStatusResults ) { if ( f . isDir ( ) ) { leafDir = false ; listStatusForLeafDir ( fs , f , acc ) ; } } if ( leafDir ) { acc . add ( pathStatus ) ; } } | pass in a directory path get the list of statuses of leaf directories |
32,377 | public static void replaceFile ( File src , File target ) throws IOException { if ( ! src . renameTo ( target ) ) { int retries = 5 ; while ( target . exists ( ) && ! target . delete ( ) && retries -- >= 0 ) { try { Thread . sleep ( 1000 ) ; } catch ( InterruptedException e ) { throw new IOException ( "replaceFile interrupted." ) ; } } if ( ! src . renameTo ( target ) ) { throw new IOException ( "Unable to rename " + src + " to " + target ) ; } } } | Move the src file to the name specified by target . |
32,378 | public Iterator < Writable > iterator ( ) { final TupleWritable t = this ; return new Iterator < Writable > ( ) { long i = written ; long last = 0L ; public boolean hasNext ( ) { return 0L != i ; } public Writable next ( ) { last = Long . lowestOneBit ( i ) ; if ( 0 == last ) throw new NoSuchElementException ( ) ; i ^= last ; return t . get ( Long . numberOfTrailingZeros ( last ) % 64 ) ; } public void remove ( ) { t . written ^= last ; if ( t . has ( Long . numberOfTrailingZeros ( last ) ) ) { throw new IllegalStateException ( "Attempt to remove non-existent val" ) ; } } } ; } | Return an iterator over the elements in this tuple . Note that this doesn t flatten the tuple ; one may receive tuples from this iterator . |
32,379 | public void close ( ) throws IOException { if ( currentOffset < dataLength ) { byte [ ] t = new byte [ Math . min ( ( int ) ( Integer . MAX_VALUE & ( dataLength - currentOffset ) ) , 32 * 1024 ) ] ; while ( currentOffset < dataLength ) { int n = read ( t , 0 , t . length ) ; if ( 0 == n ) { throw new EOFException ( "Could not validate checksum" ) ; } } } in . close ( ) ; } | Close the input stream . Note that we need to read to the end of the stream to validate the checksum . |
32,380 | public int read ( byte [ ] b , int off , int len ) throws IOException { if ( currentOffset >= dataLength ) { return - 1 ; } return doRead ( b , off , len ) ; } | Read bytes from the stream . At EOF checksum is validated but the checksum bytes are not passed back in the buffer . |
32,381 | public int readWithChecksum ( byte [ ] b , int off , int len ) throws IOException { if ( currentOffset == length ) { return - 1 ; } else if ( currentOffset >= dataLength ) { int lenToCopy = ( int ) ( checksumSize - ( currentOffset - dataLength ) ) ; if ( len < lenToCopy ) { lenToCopy = len ; } System . arraycopy ( csum , ( int ) ( currentOffset - dataLength ) , b , off , lenToCopy ) ; currentOffset += lenToCopy ; return lenToCopy ; } int bytesRead = doRead ( b , off , len ) ; if ( currentOffset == dataLength ) { if ( len >= bytesRead + checksumSize ) { System . arraycopy ( csum , 0 , b , off + bytesRead , checksumSize ) ; bytesRead += checksumSize ; currentOffset += checksumSize ; } } return bytesRead ; } | Read bytes from the stream . At EOF checksum is validated and sent back as the last four bytes of the buffer . The caller should handle these bytes appropriately |
32,382 | private Map < String , List < ResourceGrant > > scheduleTasks ( ) { fullyScheduled = false ; long nodeWait = configManager . getLocalityWait ( type , LocalityLevel . NODE ) ; long rackWait = configManager . getLocalityWait ( type , LocalityLevel . RACK ) ; int tasksToSchedule = configManager . getGrantsPerIteration ( ) ; Map < String , List < ResourceGrant > > sessionIdToGranted = new HashMap < String , List < ResourceGrant > > ( ) ; for ( int i = 0 ; i < tasksToSchedule ; i ++ ) { ScheduledPair scheduled = scheduleOneTask ( nodeWait , rackWait ) ; if ( scheduled == null ) { fullyScheduled = true ; break ; } List < ResourceGrant > granted = sessionIdToGranted . get ( scheduled . sessionId . toString ( ) ) ; if ( granted == null ) { granted = new LinkedList < ResourceGrant > ( ) ; sessionIdToGranted . put ( scheduled . sessionId . toString ( ) , granted ) ; } granted . add ( scheduled . grant ) ; } return sessionIdToGranted ; } | Match requests to nodes . |
32,383 | private ScheduledPair scheduleOneTask ( long nodeWait , long rackWait ) { if ( ! nodeManager . existRunnableNodes ( type ) ) { return null ; } Queue < PoolGroupSchedulable > poolGroupQueue = poolGroupManager . getScheduleQueue ( ) ; while ( ! poolGroupQueue . isEmpty ( ) ) { PoolGroupSchedulable poolGroup = poolGroupQueue . poll ( ) ; if ( poolGroup . reachedMaximum ( ) ) { continue ; } Queue < PoolSchedulable > poolQueue = poolGroup . getScheduleQueue ( ) ; while ( ! poolQueue . isEmpty ( ) ) { PoolSchedulable pool = poolQueue . poll ( ) ; if ( pool . reachedMaximum ( ) ) { continue ; } Queue < SessionSchedulable > sessionQueue = pool . getScheduleQueue ( ) ; while ( ! sessionQueue . isEmpty ( ) ) { SessionSchedulable schedulable = sessionQueue . poll ( ) ; Session session = schedulable . getSession ( ) ; long now = ClusterManager . clock . getTime ( ) ; MatchedPair pair = doMatch ( schedulable , now , nodeWait , rackWait ) ; synchronized ( session ) { if ( session . isDeleted ( ) ) { continue ; } if ( pair != null ) { ResourceGrant grant = commitMatchedResource ( session , pair ) ; if ( grant != null ) { poolGroup . incGranted ( 1 ) ; pool . incGranted ( 1 ) ; schedulable . incGranted ( 1 ) ; poolGroupQueue . add ( poolGroup ) ; poolQueue . add ( pool ) ; sessionQueue . add ( schedulable ) ; return new ScheduledPair ( session . getSessionId ( ) . toString ( ) , grant ) ; } } } } } } return null ; } | Try match one request to one node |
32,384 | private MatchedPair doMatch ( SessionSchedulable schedulable , long now , long nodeWait , long rackWait ) { schedulable . adjustLocalityRequirement ( now , nodeWait , rackWait ) ; for ( LocalityLevel level : neededLocalityLevels ) { if ( level . isBetterThan ( schedulable . getLastLocality ( ) ) ) { continue ; } if ( needLocalityCheck ( level , nodeWait , rackWait ) && ! schedulable . isLocalityGoodEnough ( level ) ) { break ; } Session session = schedulable . getSession ( ) ; synchronized ( session ) { if ( session . isDeleted ( ) ) { return null ; } int pendingRequestCount = session . getPendingRequestCountForType ( type ) ; MatchedPair matchedPair = null ; if ( nodeSnapshot == null || pendingRequestCount < nodeSnapshot . getRunnableHostCount ( ) ) { matchedPair = matchNodeForSession ( session , level ) ; } else { matchedPair = matchSessionForNode ( session , level ) ; } if ( matchedPair != null ) { schedulable . setLocalityLevel ( level ) ; return matchedPair ; } } } schedulable . startLocalityWait ( now ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Could not find a node for " + schedulable . getSession ( ) . getHandle ( ) ) ; } return null ; } | Find a node to give resources to this schedulable session . |
32,385 | private MatchedPair matchNodeForSession ( Session session , LocalityLevel level ) { Iterator < ResourceRequestInfo > pendingRequestIterator = session . getPendingRequestIteratorForType ( type ) ; while ( pendingRequestIterator . hasNext ( ) ) { ResourceRequestInfo req = pendingRequestIterator . next ( ) ; Set < String > excluded = req . getExcludeHosts ( ) ; if ( req . getHosts ( ) == null || req . getHosts ( ) . size ( ) == 0 ) { String host = null ; ClusterNode node = nodeManager . getRunnableNode ( host , LocalityLevel . ANY , type , excluded ) ; if ( node != null ) { return new MatchedPair ( node , req ) ; } continue ; } for ( RequestedNode requestedNode : req . getRequestedNodes ( ) ) { ClusterNode node = nodeManager . getRunnableNode ( requestedNode , level , type , excluded ) ; if ( node != null ) { return new MatchedPair ( node , req ) ; } } } return null ; } | Find a matching pair of node request by looping through the requests in the session looking at the hosts in each request and making look - ups into the node manager . |
32,386 | private MatchedPair matchSessionForNode ( Session session , LocalityLevel level ) { if ( level == LocalityLevel . NODE || level == LocalityLevel . ANY ) { Set < Map . Entry < String , NodeContainer > > hostNodesSet = nodeSnapshot . runnableHosts ( ) ; for ( Map . Entry < String , NodeContainer > hostNodes : hostNodesSet ) { Iterator < ClusterNode > clusterNodeIt = hostNodes . getValue ( ) . iterator ( ) ; while ( clusterNodeIt . hasNext ( ) ) { ClusterNode node = clusterNodeIt . next ( ) ; if ( ! nodeManager . hasEnoughResource ( node ) ) { continue ; } ResourceRequestInfo req = null ; if ( level == LocalityLevel . NODE ) { req = session . getPendingRequestOnHost ( node . getHost ( ) , type ) ; } else { req = session . getPendingRequestForAny ( node . getHost ( ) , type ) ; } if ( req != null ) { return new MatchedPair ( node , req ) ; } } } } else if ( level == LocalityLevel . RACK ) { Set < Map . Entry < Node , NodeContainer > > rackNodesSet = nodeSnapshot . runnableRacks ( ) ; for ( Map . Entry < Node , NodeContainer > rackNodes : rackNodesSet ) { Node rack = rackNodes . getKey ( ) ; NodeContainer nodes = rackNodes . getValue ( ) ; Iterator < ClusterNode > clusterNodeIt = nodes . iterator ( ) ; while ( clusterNodeIt . hasNext ( ) ) { ClusterNode node = clusterNodeIt . next ( ) ; if ( ! nodeManager . hasEnoughResource ( node ) ) { continue ; } ResourceRequestInfo req = session . getPendingRequestOnRack ( node . getHost ( ) , rack , type ) ; if ( req != null ) { return new MatchedPair ( node , req ) ; } } } } return null ; } | Find a matching pair of node request by looping through runnable nodes in the node snapshot created earlier . For each node we make lookups in the session to find a suitable request . |
32,387 | private boolean needLocalityCheck ( LocalityLevel level , long nodeWait , long rackWait ) { if ( level == LocalityLevel . NODE ) { return nodeWait != 0 ; } if ( level == LocalityLevel . RACK ) { return rackWait != 0 ; } return false ; } | If the locality wait time is zero we don t need to check locality at all . |
32,388 | private ResourceGrant commitMatchedResource ( Session session , MatchedPair pair ) { ResourceGrant grant = null ; ResourceRequestInfo req = pair . req ; ClusterNode node = pair . node ; String appInfo = nodeManager . getAppInfo ( node , type ) ; if ( appInfo != null ) { if ( nodeManager . addGrant ( node , session . getSessionId ( ) , req ) ) { grant = new ResourceGrant ( req . getId ( ) , node . getName ( ) , node . getAddress ( ) , ClusterManager . clock . getTime ( ) , req . getType ( ) ) ; grant . setAppInfo ( appInfo ) ; sessionManager . grantResource ( session , req , grant ) ; } } if ( nodeSnapshot != null ) { synchronized ( node ) { if ( node . deleted ) { nodeSnapshot . removeNode ( node ) ; } else if ( ! node . checkForGrant ( Utilities . getUnitResourceRequest ( type ) , nodeManager . getResourceLimit ( ) ) ) { nodeSnapshot . removeNode ( node ) ; } } } return grant ; } | Given a session and match of request - node perform a transaction commit |
32,389 | private void doPreemption ( ) { long now = ClusterManager . clock . getTime ( ) ; if ( now - lastPreemptionTime > PREEMPTION_PERIOD ) { lastPreemptionTime = now ; doPreemptionNow ( ) ; } } | Performs preemption if it has been long enough since the last round . |
32,390 | private void doPreemptionNow ( ) { int totalShare = nodeManager . getAllocatedCpuForType ( type ) ; poolGroupManager . distributeShare ( totalShare ) ; for ( PoolGroupSchedulable poolGroup : poolGroupManager . getPoolGroups ( ) ) { poolGroup . distributeShare ( ) ; } int tasksToPreempt = countTasksShouldPreempt ( ) ; if ( tasksToPreempt > 0 ) { LOG . info ( "Found " + tasksToPreempt + " " + type + " tasks to preempt" ) ; preemptTasks ( tasksToPreempt ) ; } } | Performs the preemption . |
32,391 | private void preemptTasks ( int tasksToPreempt ) { LOG . info ( "Start preempt " + tasksToPreempt + " for type " + type ) ; long maxRunningTime = configManager . getPreemptedTaskMaxRunningTime ( ) ; int rounds = configManager . getPreemptionRounds ( ) ; while ( tasksToPreempt > 0 ) { int preempted = preemptOneSession ( tasksToPreempt , maxRunningTime ) ; if ( preempted == 0 ) { maxRunningTime *= 2 ; if ( -- rounds <= 0 || maxRunningTime <= 0 ) { LOG . warn ( "Cannot preempt enough " + type + " tasks " + " rounds " + configManager . getPreemptionRounds ( ) + " maxRunningTime " + maxRunningTime + " tasks not preempted:" + tasksToPreempt ) ; return ; } } tasksToPreempt -= preempted ; } } | Kill tasks from over - scheduled sessions |
32,392 | private int preemptOneSession ( int maxToPreemt , long maxRunningTime ) { Queue < PoolGroupSchedulable > poolGroupQueue = poolGroupManager . getPreemptQueue ( ) ; while ( ! poolGroupQueue . isEmpty ( ) ) { PoolGroupSchedulable poolGroup = poolGroupQueue . poll ( ) ; poolGroup . distributeShare ( ) ; Queue < PoolSchedulable > poolQueue = poolGroup . getPreemptQueue ( ) ; while ( ! poolQueue . isEmpty ( ) ) { PoolSchedulable pool = poolQueue . poll ( ) ; pool . distributeShare ( ) ; if ( ! pool . isPreemptable ( ) ) { continue ; } Queue < SessionSchedulable > sessionQueue = pool . getPreemptQueue ( ) ; while ( ! sessionQueue . isEmpty ( ) ) { SessionSchedulable schedulable = sessionQueue . poll ( ) ; try { int overScheduled = ( int ) ( schedulable . getGranted ( ) - schedulable . getShare ( ) ) ; if ( overScheduled <= 0 ) { continue ; } maxToPreemt = Math . min ( maxToPreemt , overScheduled ) ; LOG . info ( "Trying to preempt " + maxToPreemt + " " + type + " from " + schedulable . getSession ( ) . getHandle ( ) ) ; int preempted = preemptSession ( schedulable , maxToPreemt , maxRunningTime ) ; poolGroup . incGranted ( - 1 * preempted ) ; pool . incGranted ( - 1 * preempted ) ; schedulable . incGranted ( - 1 * preempted ) ; return preempted ; } catch ( InvalidSessionHandle e ) { LOG . warn ( "Invalid session handle:" + schedulable . getSession ( ) . getHandle ( ) + " Session may be removed" ) ; } finally { poolGroupQueue . add ( poolGroup ) ; poolQueue . add ( pool ) ; } } } } return 0 ; } | Find the most over - scheduled session in the most over - scheduled pool . Kill tasks from this session . |
32,393 | private int preemptSession ( SessionSchedulable schedulable , int maxToPreemt , long maxRunningTime ) throws InvalidSessionHandle { Session session = schedulable . getSession ( ) ; List < Integer > grantIds ; synchronized ( session ) { grantIds = session . getGrantsToPreempt ( maxToPreemt , maxRunningTime , type ) ; } List < ResourceGrant > revokedGrants = sessionManager . revokeResource ( session . getHandle ( ) , grantIds ) ; for ( ResourceGrant grant : revokedGrants ) { nodeManager . cancelGrant ( grant . nodeName , session . getSessionId ( ) , grant . getId ( ) ) ; } sessionNotifier . notifyRevokeResource ( session . getHandle ( ) , revokedGrants , true ) ; int preempted = revokedGrants . size ( ) ; LOG . info ( "Preempt " + preempted + " " + type + " tasks for Session:" + session . getHandle ( ) ) ; return preempted ; } | Preempt a session . |
32,394 | private int countTasksShouldPreempt ( ) { int tasksToPreempt = 0 ; long now = ClusterManager . clock . getTime ( ) ; for ( PoolGroupSchedulable poolGroup : poolGroupManager . getPoolGroups ( ) ) { for ( PoolSchedulable pool : poolGroup . getPools ( ) ) { if ( pool . isStarving ( now ) ) { tasksToPreempt += Math . min ( pool . getPending ( ) , pool . getShare ( ) - pool . getGranted ( ) ) ; } } } return tasksToPreempt ; } | Count how many tasks should preempt for the starving pools |
32,395 | public void addSession ( String id , Session session ) { poolGroupManager . addSession ( id , session ) ; LOG . info ( "Session " + id + " has been added to " + type + " scheduler" ) ; } | Add a session to this scheduler . |
32,396 | public static < K extends WritableComparable , V extends Writable > Writable getEntry ( MapFile . Reader [ ] readers , Partitioner < K , V > partitioner , K key , V value ) throws IOException { int part = partitioner . getPartition ( key , value , readers . length ) ; return readers [ part ] . get ( key , value ) ; } | Get an entry from output generated by this class . |
32,397 | public void monitor ( LocalStore ls ) { int in = 0 ; EventRecord er = null ; Environment . logInfo ( "Started processing log..." ) ; while ( ( er = getNext ( ) ) != null ) { if ( er . isValid ( ) ) { ls . insert ( er ) ; } } PersistentState . updateState ( file . getAbsolutePath ( ) , firstLine , offset ) ; PersistentState . writeState ( "conf/parsing.state" ) ; } | Insert all EventRecords that can be extracted for the represented hardware component into a LocalStore . |
32,398 | public EventRecord [ ] monitor ( ) { ArrayList < EventRecord > recs = new ArrayList < EventRecord > ( ) ; EventRecord er ; while ( ( er = getNext ( ) ) != null ) recs . add ( er ) ; EventRecord [ ] T = new EventRecord [ recs . size ( ) ] ; return recs . toArray ( T ) ; } | Get an array of all EventRecords that can be extracted for the represented hardware component . |
32,399 | public EventRecord getNext ( ) { try { String line = reader . readLine ( ) ; if ( line != null ) { if ( firstLine == null ) firstLine = new String ( line ) ; offset += line . length ( ) + 1 ; return parseLine ( line ) ; } } catch ( IOException e ) { e . printStackTrace ( ) ; } return null ; } | Continue parsing the log file until a valid log entry is identified . When one such entry is found parse it and return a corresponding EventRecord . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.