idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
32,500 | public static boolean saveStaticResolutions ( Configuration conf ) { List < String [ ] > staticResolutions = NetUtils . getAllStaticResolutions ( ) ; if ( staticResolutions != null && staticResolutions . size ( ) > 0 ) { StringBuffer str = new StringBuffer ( ) ; for ( int i = 0 ; i < staticResolutions . size ( ) ; i ++ ) { String [ ] hostToResolved = staticResolutions . get ( i ) ; str . append ( hostToResolved [ 0 ] + "=" + hostToResolved [ 1 ] ) ; if ( i != staticResolutions . size ( ) - 1 ) { str . append ( ',' ) ; } } conf . set ( "hadoop.net.static.resolutions" , str . toString ( ) ) ; return true ; } return false ; } | Save the static resolutions to configuration . This is required for junit to work on testcases that simulate multiple nodes on a single physical node . |
32,501 | public ResourceUsage getResourceUsage ( ) { int totalMapperGrants = 0 ; int totalReducerGrants = 0 ; synchronized ( lockObject ) { for ( Map . Entry < Integer , ResourceGrant > entry : grantedResources . entrySet ( ) ) { switch ( entry . getValue ( ) . getType ( ) ) { case MAP : ++ totalMapperGrants ; break ; case REDUCE : ++ totalReducerGrants ; break ; case JOBTRACKER : break ; default : throw new RuntimeException ( "Illegal type " + entry . getValue ( ) . getType ( ) ) ; } } } return new ResourceUsage ( totalMapperGrants , totalReducerGrants ) ; } | Get a snapshot of the resource usage . |
32,502 | public List < ResourceRequest > getWantedResources ( ) { List < ResourceRequest > wanted = new ArrayList < ResourceRequest > ( ) ; synchronized ( lockObject ) { for ( Integer requestId : setDifference ( requestMap . keySet ( ) , requestedResources . keySet ( ) ) ) { ResourceRequest req = requestMap . get ( requestId ) ; LOG . info ( "Filing request for resource " + requestId ) ; requestedResources . put ( requestId , req ) ; wanted . add ( req ) ; } } return wanted ; } | Find what new requests need to be sent by finding out resources needed by tasks but not sent to Cluster Manager . |
32,503 | public List < ResourceRequest > getResourcesToRelease ( ) { List < ResourceRequest > release = new ArrayList < ResourceRequest > ( ) ; synchronized ( lockObject ) { for ( Integer requestId : setDifference ( requestedResources . keySet ( ) , requestMap . keySet ( ) ) ) { ResourceRequest req = requestedResources . remove ( requestId ) ; if ( req != null ) { release . add ( req ) ; LOG . info ( "Filing release for requestId: " + req . getId ( ) ) ; } } } return release ; } | Go through all the requested resources and find what needs to be released . |
32,504 | public void releaseResource ( int resourceId ) { synchronized ( lockObject ) { ResourceRequest req = requestedResources . get ( resourceId ) ; removeRequestUnprotected ( req ) ; } } | Release the resource that was requested |
32,505 | public void addNewGrants ( List < ResourceGrant > grants ) { int numGranted = 0 ; int numAvailable = 0 ; synchronized ( lockObject ) { for ( ResourceGrant grant : grants ) { Integer requestId = grant . getId ( ) ; if ( ! requestedResources . containsKey ( requestId ) || ! requestMap . containsKey ( requestId ) ) { LOG . info ( "Request for grant " + grant . getId ( ) + " no longer exists" ) ; continue ; } assert ! grantedResources . containsKey ( grant . getId ( ) ) : "Grant " + grant . getId ( ) + " has already been processed." ; updateTrackerAddressUnprotected ( grant ) ; addGrantedResourceUnprotected ( grant ) ; } updateGrantStatsUnprotected ( ) ; numGranted = grantedResources . size ( ) ; numAvailable = availableResources . size ( ) ; lockObject . notify ( ) ; } LOG . info ( "Number of available grants: " + numAvailable + " out of " + numGranted ) ; } | Obtained new grants from Cluster Manager . |
32,506 | public void updateTrackerAddr ( String trackerName , InetAddress addr ) { synchronized ( lockObject ) { trackerAddress . put ( trackerName , addr ) ; } } | Updates mapping between tracker names and adresses |
32,507 | public static InetAddress getRemoteIp ( ) { Call call = CurCall . get ( ) ; if ( call != null ) { return call . connection . socket . getInetAddress ( ) ; } return null ; } | Returns the remote side ip address when invoked inside an RPC Returns null incase of an error . |
32,508 | public static UserGroupInformation getCurrentUGI ( ) { try { UserGroupInformation origUGI = OrigUGI . get ( ) ; if ( origUGI != null ) { return origUGI ; } Call call = CurCall . get ( ) ; if ( call != null ) { return call . connection . header . getUgi ( ) ; } } catch ( Exception e ) { } return null ; } | Gives access to the subject of the current call . |
32,509 | public static long delayResponse ( ) { Call call = CurCall . get ( ) ; long res = 0 ; if ( call != null ) { call . delayResponse ( ) ; res = delayedRpcId . getAndIncrement ( ) ; delayedCalls . put ( res , call ) ; } return res ; } | If invoked from the RPC handling code will mark this call as delayed response . It returns the id of the delayed call . The response will only be sent once sendDelayedResponse method is called with the id returned from this one . |
32,510 | public static String getRemoteAddress ( ) { InetAddress addr = getRemoteIp ( ) ; return ( addr == null ) ? null : addr . getHostAddress ( ) ; } | Returns remote address as a string when invoked inside an RPC . Returns null in case of an error . |
32,511 | public static void bind ( ServerSocket socket , InetSocketAddress address , int backlog ) throws IOException { try { socket . bind ( address , backlog ) ; } catch ( BindException e ) { BindException bindException = new BindException ( "Problem binding to " + address + " : " + e . getMessage ( ) ) ; bindException . initCause ( e ) ; throw bindException ; } catch ( SocketException e ) { if ( "Unresolved address" . equals ( e . getMessage ( ) ) ) { throw new UnknownHostException ( "Invalid hostname for server: " + address . getHostName ( ) ) ; } else { throw e ; } } } | A convenience method to bind to a given address and report better exceptions if the address is not a valid host . |
32,512 | public synchronized void start ( ) throws IOException { if ( responder . isAlive ( ) ) { return ; } responder . start ( ) ; listener . start ( ) ; handlers = new Handler [ handlerCount ] ; for ( int i = 0 ; i < handlerCount ; i ++ ) { handlers [ i ] = new Handler ( i ) ; handlers [ i ] . start ( ) ; } } | Starts the service . Must be called before any calls will be handled . |
32,513 | public synchronized void waitForHandlers ( ) throws InterruptedException { if ( handlers != null ) { for ( int i = 0 ; i < handlerCount ; i ++ ) { if ( handlers [ i ] != null ) { handlers [ i ] . join ( ) ; } } } } | Waits for all RPC handlers to exit . This ensures that no further RPC calls would be processed by this server . |
32,514 | public Writable call ( Writable param , long receiveTime ) throws IOException { return call ( null , param , receiveTime ) ; } | Called for each call . |
32,515 | synchronized Collection < Journal > getJournals ( ) { Collection < Journal > journals = new ArrayList < Journal > ( ) ; for ( Journal j : journalsById . values ( ) ) { journals . add ( j ) ; } return journals ; } | Get journals managed by this journal node . |
32,516 | public void start ( ) throws IOException { Preconditions . checkState ( ! isStarted ( ) , "JN already running" ) ; journalNodes = getJournalHttpAddresses ( conf ) ; if ( journalNodes . isEmpty ( ) ) { String msg = JournalConfigKeys . DFS_JOURNALNODE_HOSTS + " is not present in the configuration." ; LOG . fatal ( msg ) ; throw new IOException ( msg ) ; } LOG . info ( "JournalNode hosts: " + journalNodes ) ; validateAndCreateJournalDir ( localDir ) ; LOG . info ( "JournalNode storage: " + localDir . getAbsolutePath ( ) ) ; InetSocketAddress socAddr = JournalNodeRpcServer . getAddress ( conf ) ; metrics = new JournalNodeMetrics ( conf , socAddr . toString ( ) ) ; httpServer = new JournalNodeHttpServer ( conf , this ) ; httpServer . start ( ) ; rpcServer = new JournalNodeRpcServer ( conf , this ) ; rpcServer . start ( ) ; journalSyncer = new JournalNodeJournalSyncer ( journalNodes , httpServer . getAddress ( ) , conf ) ; journalSyncerThread = new Thread ( journalSyncer , "Thread-JournalSyncer" ) ; journalSyncerThread . start ( ) ; } | Start listening for edits via RPC . |
32,517 | public void stop ( int rc ) { this . resultCode = rc ; LOG . info ( "Stopping Journal Node: " + this ) ; if ( rpcServer != null ) { rpcServer . stop ( ) ; rpcServer = null ; } if ( httpServer != null ) { try { httpServer . stop ( ) ; } catch ( IOException ioe ) { LOG . warn ( "Unable to stop HTTP server for " + this , ioe ) ; } } for ( Journal j : journalsById . values ( ) ) { IOUtils . cleanup ( LOG , j ) ; } if ( metrics != null ) { metrics . shutdown ( ) ; } if ( journalSyncer != null ) { journalSyncer . stop ( ) ; } } | Stop the daemon with the given status code |
32,518 | private File getJournalDir ( String jid ) { String dir = conf . get ( JournalConfigKeys . DFS_JOURNALNODE_DIR_KEY , JournalConfigKeys . DFS_JOURNALNODE_DIR_DEFAULT ) ; Preconditions . checkArgument ( jid != null && ! jid . isEmpty ( ) , "bad journal identifier: %s" , jid ) ; return new File ( new File ( new File ( dir ) , "edits" ) , jid ) ; } | Return the directory inside our configured storage dir which corresponds to a given journal . |
32,519 | static List < InetSocketAddress > getJournalHttpAddresses ( Configuration conf ) { String [ ] hosts = JournalConfigHelper . getJournalHttpHosts ( conf ) ; List < InetSocketAddress > addrs = new ArrayList < InetSocketAddress > ( ) ; for ( String host : hosts ) { addrs . add ( NetUtils . createSocketAddr ( host ) ) ; } return addrs ; } | Get the list of journal addresses to connect . |
32,520 | public int compareTo ( Object o ) { int thisValue = this . value ; int thatValue = ( ( ByteWritable ) o ) . value ; return ( thisValue < thatValue ? - 1 : ( thisValue == thatValue ? 0 : 1 ) ) ; } | Compares two ByteWritables . |
32,521 | public static void setPolicy ( Policy policy ) { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Setting Hadoop security policy" ) ; } Policy . setPolicy ( policy ) ; } | Set the global security policy for Hadoop . |
32,522 | public void reduce ( K2 key , Iterator < V2 > values , OutputCollector < K3 , V3 > output , Reporter reporter ) throws IOException { isOk = false ; startApplication ( output , reporter ) ; downlink . reduceKey ( key ) ; while ( values . hasNext ( ) ) { downlink . reduceValue ( values . next ( ) ) ; } if ( skipping ) { downlink . flush ( ) ; } isOk = true ; } | Process all of the keys and values . Start up the application if we haven t started it yet . |
32,523 | public void close ( ) throws IOException { if ( isOk ) { OutputCollector < K3 , V3 > nullCollector = new OutputCollector < K3 , V3 > ( ) { public void collect ( K3 key , V3 value ) throws IOException { } } ; startApplication ( nullCollector , Reporter . NULL ) ; } try { if ( isOk ) { application . getDownlink ( ) . endOfInput ( ) ; } else { application . getDownlink ( ) . abort ( ) ; } LOG . info ( "waiting for finish" ) ; application . waitForFinish ( ) ; LOG . info ( "got done" ) ; } catch ( Throwable t ) { application . abort ( t ) ; } finally { application . cleanup ( ) ; } } | Handle the end of the input by closing down the application . |
32,524 | private void updateJobWithSplit ( final JobConf job , InputSplit inputSplit ) { if ( inputSplit instanceof FileSplit ) { FileSplit fileSplit = ( FileSplit ) inputSplit ; job . set ( "map.input.file" , fileSplit . getPath ( ) . toString ( ) ) ; job . setLong ( "map.input.start" , fileSplit . getStart ( ) ) ; job . setLong ( "map.input.length" , fileSplit . getLength ( ) ) ; } LOG . info ( "split: " + inputSplit . toString ( ) ) ; } | Update the job with details about the file split |
32,525 | synchronized void jobAdded ( JobInProgress job ) throws IOException { QueueSchedulingInfo qsi = queueInfoMap . get ( job . getProfile ( ) . getQueueName ( ) ) ; Integer i = qsi . numJobsByUser . get ( job . getProfile ( ) . getUser ( ) ) ; if ( null == i ) { i = 1 ; qsi . mapTSI . numSlotsOccupiedByUser . put ( job . getProfile ( ) . getUser ( ) , Integer . valueOf ( 0 ) ) ; qsi . reduceTSI . numSlotsOccupiedByUser . put ( job . getProfile ( ) . getUser ( ) , Integer . valueOf ( 0 ) ) ; } else { i ++ ; } qsi . numJobsByUser . put ( job . getProfile ( ) . getUser ( ) , i ) ; preInitializeJob ( job ) ; LOG . debug ( "Job " + job . getJobID ( ) . toString ( ) + " is added under user " + job . getProfile ( ) . getUser ( ) + ", user now has " + i + " jobs" ) ; } | called when a job is added |
32,526 | synchronized void jobCompleted ( JobInProgress job ) { QueueSchedulingInfo qsi = queueInfoMap . get ( job . getProfile ( ) . getQueueName ( ) ) ; LOG . debug ( "JOb to be removed for user " + job . getProfile ( ) . getUser ( ) ) ; Integer i = qsi . numJobsByUser . get ( job . getProfile ( ) . getUser ( ) ) ; i -- ; if ( 0 == i . intValue ( ) ) { qsi . numJobsByUser . remove ( job . getProfile ( ) . getUser ( ) ) ; qsi . mapTSI . numSlotsOccupiedByUser . remove ( job . getProfile ( ) . getUser ( ) ) ; qsi . reduceTSI . numSlotsOccupiedByUser . remove ( job . getProfile ( ) . getUser ( ) ) ; LOG . debug ( "No more jobs for user, number of users = " + qsi . numJobsByUser . size ( ) ) ; } else { qsi . numJobsByUser . put ( job . getProfile ( ) . getUser ( ) , i ) ; LOG . debug ( "User still has " + i + " jobs, number of users = " + qsi . numJobsByUser . size ( ) ) ; } } | called when a job completes |
32,527 | public static void write ( DataOutput out , OpenFileInfo elem ) throws IOException { OpenFileInfo info = new OpenFileInfo ( elem . filePath , elem . millisOpen ) ; info . write ( out ) ; } | milliseconds that the file has been held open |
32,528 | public void reloadConfigIfNecessary ( ) { if ( whitelistFile == null ) { return ; } long time = System . currentTimeMillis ( ) ; if ( time > lastReloadAttempt + CONFIG_RELOAD_INTERVAL ) { lastReloadAttempt = time ; try { File file = new File ( whitelistFile ) ; long lastModified = file . lastModified ( ) ; if ( lastModified > lastSuccessfulReload && time > lastModified + configReloadWait ) { reloadWhitelist ( ) ; lastSuccessfulReload = time ; lastReloadAttemptFailed = false ; } } catch ( Exception e ) { if ( ! lastReloadAttemptFailed ) { LOG . error ( "Failed to reload whitelist file - " + "will use existing allocations." , e ) ; } lastReloadAttemptFailed = true ; } } } | Checks to see if the namenode config file is updated on disk If so then read all it contents . At present only the whitelist config is updated but we will enahnce this to update all possible namenode configs in future . |
32,529 | void reloadWhitelist ( ) throws IOException { LinkedList < String > paths = new LinkedList < String > ( ) ; FileInputStream fstream = new FileInputStream ( whitelistFile ) ; DataInputStream in = new DataInputStream ( fstream ) ; BufferedReader br = new BufferedReader ( new InputStreamReader ( in ) ) ; int count = 0 ; while ( true ) { String str = br . readLine ( ) ; if ( str == null ) { break ; } str = str . trim ( ) ; if ( str . startsWith ( "#" ) ) { continue ; } paths . add ( str ) ; LOG . info ( "Whitelisted directory [" + count + "] " + str ) ; count ++ ; } in . close ( ) ; namesys . writeLock ( ) ; try { namesys . neverDeletePaths . clear ( ) ; for ( String s : paths ) { namesys . neverDeletePaths . add ( s ) ; } } finally { namesys . writeUnlock ( ) ; } } | Removes all the entries currently in neverDeletePaths and add the new ones specified |
32,530 | public static void constructFakeRaidFile ( DistributedFileSystem dfs , String filePath , RaidCodec codec ) throws IOException { long blockSize = 512L ; byte [ ] buffer = new byte [ ( int ) ( codec . numDataBlocks * blockSize ) ] ; int [ ] checksum = new int [ codec . numDataBlocks ] ; OutputStream out = dfs . create ( new Path ( filePath ) , true , 1 , codec . parityReplication , blockSize ) ; random . nextBytes ( buffer ) ; out . write ( buffer ) ; out . close ( ) ; Path parityTmp = new Path ( filePath + "_parity" ) ; buffer = new byte [ ( int ) ( codec . numParityBlocks * blockSize ) ] ; out = dfs . create ( parityTmp , true , 1 , codec . parityReplication , blockSize ) ; random . nextBytes ( buffer ) ; out . write ( buffer ) ; out . close ( ) ; FileStatus stat = dfs . getFileStatus ( new Path ( filePath ) ) ; dfs . setTimes ( parityTmp , stat . getModificationTime ( ) , stat . getAccessTime ( ) ) ; dfs . merge ( parityTmp , new Path ( filePath ) , codec . id , checksum ) ; } | This function create two mock source and parity files and merge them . |
32,531 | public static MetricsContext getContext ( String refName , String contextName ) { MetricsContext metricsContext ; try { metricsContext = ContextFactory . getFactory ( ) . getContext ( refName , contextName ) ; if ( ! metricsContext . isMonitoring ( ) ) { metricsContext . startMonitoring ( ) ; } } catch ( Exception ex ) { LOG . error ( "Unable to create metrics context " + contextName , ex ) ; metricsContext = ContextFactory . getNullContext ( contextName ) ; } return metricsContext ; } | Utility method to return the named context . If the desired context cannot be created for any reason the exception is logged and a null context is returned . |
32,532 | public static MetricsRecord createRecord ( MetricsContext context , String recordName ) { MetricsRecord metricsRecord = context . createRecord ( recordName ) ; metricsRecord . setTag ( "hostName" , getHostName ( ) ) ; return metricsRecord ; } | Utility method to create and return new metrics record instance within the given context . This record is tagged with the host name . |
32,533 | private static String getHostName ( ) { String hostName = null ; try { hostName = InetAddress . getLocalHost ( ) . getHostName ( ) ; } catch ( UnknownHostException ex ) { LOG . info ( "Unable to obtain hostName" , ex ) ; hostName = "unknown" ; } return hostName ; } | Returns the host name . If the host name is unobtainable logs the exception and returns unknown . |
32,534 | private String getRunningDatanode ( Configuration conf ) throws IOException { FileSystem fs = FileSystem . newInstance ( conf ) ; fs . mkdirs ( new Path ( "/tmp" ) ) ; Path fileName = new Path ( "/tmp" , rtc . task_name + System . currentTimeMillis ( ) + rb . nextInt ( ) ) ; if ( fs . exists ( fileName ) ) { fs . delete ( fileName ) ; } FSDataOutputStream out = null ; byte [ ] buffer = new byte [ 1 ] ; buffer [ 0 ] = '0' ; try { out = fs . create ( fileName , ( short ) 1 ) ; out . write ( buffer , 0 , 1 ) ; } finally { IOUtils . closeStream ( out ) ; } fs = getDFS ( fs ) ; assert fs instanceof DistributedFileSystem ; DistributedFileSystem dfs = ( DistributedFileSystem ) fs ; BlockLocation [ ] lbs = dfs . getClient ( ) . getBlockLocations ( fileName . toUri ( ) . getPath ( ) , 0 , 1 ) ; fs . delete ( fileName ) ; return lbs [ 0 ] . getHosts ( ) [ 0 ] ; } | Write a small file to figure out which datanode we are running |
32,535 | public GenThread [ ] prepare ( JobConf conf , Text key , Text value ) throws IOException { this . rtc = new RunTimeConstants ( ) ; super . prepare ( conf , key , value , rtc ) ; Path basePath = new Path ( key . toString ( ) ) ; LOG . info ( "base path is " + basePath ) ; Path checksumPath = null ; FileSystem fs = FileSystem . newInstance ( conf ) ; if ( value . toString ( ) . length ( ) != 0 ) { checksumPath = new Path ( value . toString ( ) ) ; } HashMap < String , Long > checksumMap = null ; boolean verifyChecksum = false ; if ( fs . exists ( checksumPath ) ) { LOG . info ( "checksum path is " + checksumPath ) ; verifyChecksum = true ; checksumMap = new HashMap < String , Long > ( ) ; SequenceFile . Reader reader = null ; try { reader = new SequenceFile . Reader ( fs , checksumPath , conf ) ; Writable dir = ( Writable ) ReflectionUtils . newInstance ( reader . getKeyClass ( ) , conf ) ; Writable checksum = ( Writable ) ReflectionUtils . newInstance ( reader . getValueClass ( ) , conf ) ; while ( reader . next ( dir , checksum ) ) { LOG . info ( "dir: " + dir . toString ( ) + " checksum: " + checksum ) ; checksumMap . put ( fs . makeQualified ( new Path ( dir . toString ( ) ) ) . toUri ( ) . getPath ( ) , Long . parseLong ( checksum . toString ( ) ) ) ; } } catch ( Exception e ) { LOG . error ( e ) ; throw new IOException ( e ) ; } finally { IOUtils . closeStream ( reader ) ; } } FileStatus [ ] baseDirs = fs . listStatus ( basePath ) ; if ( rtc . nthreads != baseDirs . length ) { throw new IOException ( "Number of directory under " + basePath + "(" + baseDirs . length + ") doesn't match number of threads " + "(" + rtc . nthreads + ")." ) ; } GenReaderThread [ ] threads = new GenReaderThread [ ( int ) rtc . nthreads ] ; for ( int i = 0 ; i < rtc . nthreads ; i ++ ) { long checksum = 0 ; if ( verifyChecksum ) { String basePathStr = baseDirs [ i ] . getPath ( ) . toUri ( ) . getPath ( ) ; checksum = checksumMap . get ( basePathStr ) ; } threads [ i ] = new GenReaderThread ( conf , baseDirs [ i ] . getPath ( ) , checksum , verifyChecksum , rtc ) ; } return threads ; } | Create a number of threads to generate read traffics |
32,536 | public static FileStatus toFileStatus ( HdfsFileStatus stat , String src ) { if ( stat == null ) { return null ; } return new FileStatus ( stat . getLen ( ) , stat . isDir ( ) , stat . getReplication ( ) , stat . getBlockSize ( ) , stat . getModificationTime ( ) , stat . getAccessTime ( ) , stat . getPermission ( ) , stat . getOwner ( ) , stat . getGroup ( ) , stat . getFullPath ( new Path ( src ) ) ) ; } | Convert an HdfsFileStatus to a FileStatus |
32,537 | final public String getFullName ( final String parent ) { if ( isEmptyLocalName ( ) ) { return parent ; } StringBuilder fullName = new StringBuilder ( parent ) ; if ( ! parent . endsWith ( Path . SEPARATOR ) ) { fullName . append ( Path . SEPARATOR ) ; } fullName . append ( getLocalName ( ) ) ; return fullName . toString ( ) ; } | Get the string representation of the full path name |
32,538 | private static String getValue ( Map < String , String > map , String keyName ) { String value = map . get ( keyName ) ; return value == null ? "-" : value ; } | Retrieve value from the map corresponding to the given key . |
32,539 | private String fetchStats ( InetSocketAddress jn ) throws IOException { try { return DFSUtil . getHTMLContentWithTimeout ( new URI ( "http" , null , jn . getAddress ( ) . getHostAddress ( ) , jn . getPort ( ) , "/journalStats" , null , null ) . toURL ( ) , HTTP_CONNECT_TIMEOUT , HTTP_READ_TIMEOUT ) ; } catch ( Exception e ) { LOG . error ( "Problem connecting to " + getHostAddress ( jn ) , e ) ; return null ; } } | Fetch stats from a single given journal node over http . |
32,540 | private static Map < String , Map < String , String > > getStatsMap ( String json ) throws IOException { if ( json == null || json . isEmpty ( ) ) { return new HashMap < String , Map < String , String > > ( ) ; } TypeReference < Map < String , Map < String , String > > > type = new TypeReference < Map < String , Map < String , String > > > ( ) { } ; return mapper . readValue ( json , type ) ; } | Get the map corresponding to the JSON string |
32,541 | public static String getNodeReport ( QJMStatus status ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "<table border=1 cellpadding=1 cellspacing=0 title=\"Journals\">" ) ; sb . append ( "<thead><tr><td><b>Journal node</b></td><td><b>Alive</b></td></tr></thead>" ) ; for ( Entry < String , Boolean > e : status . getAliveMap ( ) . entrySet ( ) ) { if ( e . getValue ( ) ) { sb . append ( "<tr><td>" + e . getKey ( ) + "</td><td><font color=green>Active</font></td></tr>" ) ; } else { sb . append ( "<tr><td>" + e . getKey ( ) + "</td><td><font color=red>Failed</font></td></tr>" ) ; } } sb . append ( "</table>" ) ; return sb . toString ( ) ; } | Generate health report for journal nodes |
32,542 | public static String getJournalReport ( QJMStatus status ) { StringBuilder sb = new StringBuilder ( ) ; sb . append ( "<table border=1 cellpadding=1 cellspacing=0 title=\"Journals\">" ) ; sb . append ( "<thead><tr><td><b>JournalId</b></td><td><b>Statistics</b></td></tr></thead>" ) ; for ( String journalId : status . getJournalIds ( ) ) { sb . append ( "<tr><td>" + journalId + "</td><td>" ) ; getHTMLTableForASingleJournal ( status , journalId , sb ) ; sb . append ( "</td></tr>" ) ; } sb . append ( "</table>" ) ; return sb . toString ( ) ; } | Generate report for all journals and all journal nodes |
32,543 | public static void getHTMLTableForASingleJournal ( QJMStatus status , String journalName , StringBuilder sb ) { List < StatsDescriptor > stats = status . stats . get ( journalName ) ; if ( stats == null ) { return ; } Set < String > statsNames = status . statNames ; sb . append ( "<table border=1 align=\"right\" cellpadding=1 " + "cellspacing=0 title=\"Journal statistics\">" ) ; sb . append ( "<thead><tr><td></td>" ) ; for ( StatsDescriptor sd : stats ) { sb . append ( "<td><b>" + sd . journalNode + "</b></td>" ) ; } sb . append ( "</tr></thead>" ) ; for ( String st : statsNames ) { sb . append ( "<tr><td>" + st + "</td>" ) ; for ( StatsDescriptor sd : stats ) { sb . append ( "<td align=\"right\">" + getValue ( sd . statsPerJournal , st ) + "</td>" ) ; } sb . append ( "</tr>" ) ; } sb . append ( "</table>" ) ; } | Render html table for a single journal . |
32,544 | private static String getHostAddress ( InetSocketAddress addr ) { String hostToAppend = "" ; if ( addr . isUnresolved ( ) ) { hostToAppend = addr . getHostName ( ) ; } else { hostToAppend = addr . getAddress ( ) . getHostAddress ( ) ; } return hostToAppend ; } | Returns the address of the host minimizing DNS lookups . |
32,545 | public synchronized Pool getPool ( String name ) { Pool pool = pools . get ( name ) ; if ( pool == null ) { boolean isConfiguredPool = poolNamesInAllocFile . contains ( name ) ; pool = new Pool ( name , isConfiguredPool ) ; pools . put ( name , pool ) ; } return pool ; } | Get a pool by name creating it if necessary . |
32,546 | public boolean reloadAllocsIfNecessary ( ) { if ( allocFile == null ) { return false ; } long time = System . currentTimeMillis ( ) ; boolean reloaded = false ; if ( time > lastReloadAttempt + ALLOC_RELOAD_INTERVAL ) { lastReloadAttempt = time ; try { File file = new File ( allocFile ) ; long lastModified = file . lastModified ( ) ; if ( lastModified > lastSuccessfulReload && time > lastModified + ALLOC_RELOAD_WAIT ) { reloadAllocs ( ) ; reloaded = true ; lastSuccessfulReload = time ; lastReloadAttemptFailed = false ; } } catch ( Exception e ) { if ( ! lastReloadAttemptFailed ) { LOG . error ( "Failed to reload allocations file - " + "will use existing allocations." , e ) ; } lastReloadAttemptFailed = true ; } } return reloaded ; } | Reload allocations file if it hasn t been loaded in a while return true if reloaded |
32,547 | public int getMinSlots ( String pool , TaskType taskType ) { Map < String , Integer > allocationMap = ( taskType == TaskType . MAP ? mapAllocs : reduceAllocs ) ; Integer alloc = allocationMap . get ( pool ) ; return ( alloc == null ? 0 : alloc ) ; } | Get the allocation for a particular pool |
32,548 | public synchronized void addJob ( JobInProgress job ) { String poolName = getPoolName ( job ) ; LOG . info ( "Adding job " + job . getJobID ( ) + " to pool " + poolName + ", originally from pool " + job . getJobConf ( ) . get ( EXPLICIT_POOL_PROPERTY ) ) ; getPool ( poolName ) . addJob ( job ) ; } | Add a job in the appropriate pool |
32,549 | public synchronized void removeJob ( JobInProgress job ) { if ( getPool ( getPoolName ( job ) ) . removeJob ( job ) ) { return ; } for ( Pool pool : getPools ( ) ) { if ( pool . removeJob ( job ) ) { LOG . info ( "Removed job " + job . jobId + " from pool " + pool . getName ( ) + " instead of pool " + getPoolName ( job ) ) ; return ; } } LOG . error ( "removeJob: Couldn't find job " + job . jobId + " in any pool, should have been in pool " + getPoolName ( job ) ) ; } | Remove a job |
32,550 | public synchronized void setPool ( JobInProgress job , String pool ) { removeJob ( job ) ; job . getJobConf ( ) . set ( EXPLICIT_POOL_PROPERTY , pool ) ; addJob ( job ) ; } | Change the pool of a particular job |
32,551 | public synchronized String getPoolName ( JobInProgress job ) { String name = getExplicitPoolName ( job ) . trim ( ) ; String redirect = poolRedirectMap . get ( name ) ; if ( redirect == null ) { return name ; } else { return redirect ; } } | Get the pool name for a JobInProgress from its configuration . This uses the project property in the jobconf by default or the property set with mapred . fairscheduler . poolnameproperty . |
32,552 | public synchronized void checkValidPoolProperty ( JobInProgress job ) throws InvalidJobConfException { if ( ! strictPoolsMode ) { return ; } JobConf conf = job . getJobConf ( ) ; String poolName = conf . get ( EXPLICIT_POOL_PROPERTY ) ; if ( poolName == null ) { return ; } else { poolName = poolName . toLowerCase ( ) ; } if ( poolNamesInAllocFile . contains ( poolName ) ) { return ; } throw new InvalidJobConfException ( "checkValidPoolProperty: Pool name " + conf . get ( EXPLICIT_POOL_PROPERTY ) + " set with Hadoop property " + EXPLICIT_POOL_PROPERTY + " does not exist. " + "Please check for typos in the pool name." ) ; } | A quick check for to ensure that if the pool name is set and we are in a strict pools mode then the pool must exist to run this job . |
32,553 | public synchronized Collection < String > getPoolNames ( ) { List < String > list = new ArrayList < String > ( ) ; for ( Pool pool : getPools ( ) ) { list . add ( pool . getName ( ) ) ; } Collections . sort ( list ) ; return list ; } | Get all pool names that have been seen either in the allocation file or in a MapReduce job . |
32,554 | public boolean canBePreempted ( String pool ) { Boolean result = canBePreempted . get ( pool ) ; return result == null ? true : result ; } | Can we take slots from this pool when preempting tasks? |
32,555 | public boolean fifoWeight ( String pool ) { Boolean result = poolFifoWeight . get ( pool ) ; return result == null ? false : result ; } | Do we boost the weight for the older jobs? |
32,556 | public long getMinSharePreemptionTimeout ( String pool ) { if ( minSharePreemptionTimeouts . containsKey ( pool ) ) { return minSharePreemptionTimeouts . get ( pool ) ; } else { return defaultMinSharePreemptionTimeout ; } } | Get a pool s min share preemption timeout in milliseconds . This is the time after which jobs in the pool may kill other pools tasks if they are below their min share . |
32,557 | int getMaxSlots ( String poolName , TaskType taskType ) { Map < String , Integer > maxMap = ( taskType == TaskType . MAP ? poolMaxMaps : poolMaxReduces ) ; if ( maxMap . containsKey ( poolName ) ) { return maxMap . get ( poolName ) ; } else { return Integer . MAX_VALUE ; } } | Get the maximum map or reduce slots for the given pool . |
32,558 | public void resetRunningTasks ( TaskType type ) { Map < String , Integer > runningMap = ( type == TaskType . MAP ? poolRunningMaps : poolRunningReduces ) ; for ( String poolName : runningMap . keySet ( ) ) { runningMap . put ( poolName , 0 ) ; } } | Set the number of running tasks in all pools to zero |
32,559 | public void incRunningTasks ( String poolName , TaskType type , int inc ) { Map < String , Integer > runningMap = ( type == TaskType . MAP ? poolRunningMaps : poolRunningReduces ) ; if ( ! runningMap . containsKey ( poolName ) ) { runningMap . put ( poolName , 0 ) ; } int runningTasks = runningMap . get ( poolName ) + inc ; runningMap . put ( poolName , runningTasks ) ; } | Set the number of running tasks in a pool |
32,560 | public int getRunningTasks ( String poolName , TaskType type ) { Map < String , Integer > runningMap = ( type == TaskType . MAP ? poolRunningMaps : poolRunningReduces ) ; return ( runningMap . containsKey ( poolName ) ? runningMap . get ( poolName ) : 0 ) ; } | Get the number of running tasks in a pool |
32,561 | public boolean isMaxTasks ( String poolName , TaskType type ) { return getRunningTasks ( poolName , type ) >= getMaxSlots ( poolName , type ) ; } | Is the pool task limit exceeded? |
32,562 | public boolean checkMinimumSlotsAvailable ( ClusterStatus clusterStatus , TaskType type ) { Map < String , Integer > poolToMinSlots = ( type == TaskType . MAP ) ? mapAllocs : reduceAllocs ; int totalSlots = ( type == TaskType . MAP ) ? clusterStatus . getMaxMapTasks ( ) : clusterStatus . getMaxReduceTasks ( ) ; int totalMinSlots = 0 ; for ( int minSlots : poolToMinSlots . values ( ) ) { totalMinSlots += minSlots ; } if ( totalMinSlots > totalSlots ) { LOG . warn ( String . format ( "Bad minimum %s slot configuration. cluster:%s totalMinSlots:%s" , type , totalSlots , totalMinSlots ) ) ; return false ; } LOG . info ( String . format ( "Minimum %s slots checked. cluster:%s totalMinSlots:%s" , type , totalSlots , totalSlots ) ) ; return true ; } | Check if the minimum slots set in the configuration is feasible |
32,563 | public void start ( ) { try { if ( init ) { setErrorStatus ( "Cannot initialize multiple times" , null ) ; return ; } init = true ; HttpPost postRequest = setupRequest ( new ByteArrayOutputStream ( 0 ) ) ; UploadImageParam . setHeaders ( postRequest , journalId , namespaceInfoString , epoch , txid , 0 , segmentId ++ , false ) ; HttpClient httpClient = new DefaultHttpClient ( ) ; HttpResponse response = httpClient . execute ( postRequest ) ; if ( response . getStatusLine ( ) . getStatusCode ( ) == HttpServletResponse . SC_NOT_ACCEPTABLE ) { throwIOException ( "Error when starting upload to : " + uri + " status: " + response . getStatusLine ( ) . toString ( ) ) ; } for ( Header h : response . getAllHeaders ( ) ) { if ( h . getName ( ) . equals ( "sessionId" ) ) { sessionId = Long . parseLong ( h . getValue ( ) ) ; break ; } } if ( sessionId < 0 ) { throw new IOException ( "Session id is missing" ) ; } } catch ( Exception e ) { setErrorStatus ( "Exception when starting upload channel for: " + uri , e ) ; } } | Create an image upload channel based on image txid and other metadata . |
32,564 | public void send ( ByteArrayOutputStream bos ) { try { if ( this . isDisabled ) { return ; } if ( available . tryAcquire ( WAIT_NEXT_BUFFER_TIME_OUT_SECONDS , TimeUnit . SECONDS ) ) { tasks . add ( sendExecutor . submit ( new SendWorker ( bos , segmentId ++ , false ) ) ) ; } else { setErrorStatus ( "Number of chunks in the queue to be send exceeded the configured number " + maxBufferedChunks , null ) ; } } catch ( Exception e ) { setErrorStatus ( "Exception when submitting a task" , e ) ; } } | Send a chunk of data . |
32,565 | public void close ( ) { if ( this . isDisabled || this . closed ) { return ; } closed = true ; try { tasks . add ( sendExecutor . submit ( new SendWorker ( new ByteArrayOutputStream ( 0 ) , segmentId ++ , true ) ) ) ; for ( Future < Void > task : tasks ) { task . get ( ) ; } } catch ( InterruptedException e ) { setErrorStatus ( "Interrupted exception" , e ) ; } catch ( ExecutionException e ) { setErrorStatus ( "Execution exception" , e ) ; } finally { sendExecutor . shutdownNow ( ) ; } } | Close the upload |
32,566 | private HttpPost setupRequest ( ByteArrayOutputStream bos ) { ContentBody cb = new ByteArrayBody ( bos . toByteArray ( ) , "image" ) ; HttpPost postRequest = new HttpPost ( uri + "/uploadImage" ) ; MultipartEntity reqEntity = new MultipartEntity ( HttpMultipartMode . BROWSER_COMPATIBLE ) ; reqEntity . addPart ( "file" , cb ) ; postRequest . setEntity ( reqEntity ) ; return postRequest ; } | Create a post request encapsulating bytes from the given ByteArrayOutputStream . |
32,567 | void setErrorStatus ( String msg , Exception e ) { this . e = new IOException ( msg + " " + ( e == null ? "" : e . toString ( ) ) ) ; this . isDisabled = true ; sendExecutor . shutdown ( ) ; LOG . error ( msg , e ) ; } | If any operation for this channel fail the error status is set . Parent output stream based on this information decides whether the upload can be still continued with remaining channels . |
32,568 | private void showJobsNotAdmitted ( PrintWriter out , Set < String > userFilterSet , Set < String > poolFilterSet ) { out . print ( "<h2>Not Admitted Jobs</h2>\n" ) ; out . print ( "<b>Filter</b> " + "<input type=\"text\" onkeyup=\"filterTables(this.value)\" " + "id=\"NotAdmittedJobsTableFilter\">" + "<input type=\"checkbox\" id=\"SubmittedTimeFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>Submitted Time " + "<input type=\"checkbox\" id=\"JobIDFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>JobID " + "<input type=\"checkbox\" id=\"UserFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>User " + "<input type=\"checkbox\" id=\"PoolFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>Pool " + "<input type=\"checkbox\" id=\"PrioFilterToggle\" " + "onChange=\"filterTables(inputRJF.value)\" checked>Priority" + "<br><br>\n" ) ; out . print ( "<script type=\"text/javascript\">var inputRJF = " + "document.getElementById('NotAdmittedJobsTableFilter');</script>" ) ; out . print ( "<table border=\"2\" cellpadding=\"5\" cellspacing=\"2\" " + "id=\"NotAdmittedJobsTable\" class=\"tablesorter\">\n" ) ; out . printf ( "<thead><tr>" + "<th>Submitted Time</th>" + "<th>JobID</th>" + "<th>User</th>" + "<th>Pool</th>" + "<th>Priority</th>" + "<th>Reason</th>" + "<th>Job Position</th>" + "<th>ETA to Admission (secs)</th>" ) ; out . print ( "</tr></thead><tbody>\n" ) ; Collection < NotAdmittedJobInfo > notAdmittedJobInfos = scheduler . getNotAdmittedJobs ( ) ; for ( NotAdmittedJobInfo jobInfo : notAdmittedJobInfos ) { if ( ( userFilterSet != null ) && ! userFilterSet . contains ( jobInfo . getUser ( ) ) ) { continue ; } if ( ( poolFilterSet != null ) && ! poolFilterSet . contains ( jobInfo . getPool ( ) ) ) { continue ; } out . printf ( "<tr id=\"%s\">\n" , jobInfo . getJobName ( ) ) ; out . printf ( "<td>%s</td>\n" , DATE_FORMAT . format ( jobInfo . getStartDate ( ) ) ) ; out . printf ( "<td><a href=\"jobdetails.jsp?jobid=%s\">%s</a></td>" , jobInfo . getJobName ( ) , jobInfo . getJobName ( ) ) ; out . printf ( "<td>%s</td>\n" , jobInfo . getUser ( ) ) ; out . printf ( "<td>%s</td>\n" , jobInfo . getPool ( ) ) ; out . printf ( "<td>%s</td>\n" , jobInfo . getPriority ( ) ) ; out . printf ( "<td>%s</td>\n" , jobInfo . getReason ( ) ) ; out . printf ( "<td>%d</td>\n" , jobInfo . getHardAdmissionPosition ( ) ) ; out . printf ( "<td>%d</td>\n" , jobInfo . getEstimatedHardAdmissionEntranceSecs ( ) ) ; out . print ( "</tr>\n" ) ; } out . print ( "</tbody></table>\n" ) ; } | Print a view of not admitted jobs to the given output writer . |
32,569 | public boolean addSourceFile ( FileSystem fs , PolicyInfo info , FileStatus src , RaidState . Checker checker , long now , int targetReplication ) throws IOException { RaidState state = checker . check ( info , src , now , false ) ; Counters counters = stateToSourceCounters . get ( state ) ; counters . inc ( src ) ; if ( state == RaidState . RAIDED ) { incRaided ( src ) ; long paritySize = computeParitySize ( src , targetReplication ) ; estimatedParitySize += paritySize ; estimatedDoneParitySize += paritySize ; estimatedDoneSourceSize += src . getLen ( ) * targetReplication ; return false ; } if ( state == RaidState . NOT_RAIDED_BUT_SHOULD ) { estimatedDoneParitySize += computeParitySize ( src , targetReplication ) ; estimatedDoneSourceSize += src . getLen ( ) * targetReplication ; return true ; } return false ; } | Collect the statistics of a source file . Return true if the file should be raided but not . |
32,570 | public long getSaving ( Configuration conf ) { try { DFSClient dfs = ( ( DistributedFileSystem ) FileSystem . get ( conf ) ) . getClient ( ) ; Counters raidedCounters = stateToSourceCounters . get ( RaidState . RAIDED ) ; long physical = raidedCounters . getNumBytes ( ) + parityCounters . getNumBytes ( ) ; long logical = raidedCounters . getNumLogical ( ) ; return logical * dfs . getDefaultReplication ( ) - physical ; } catch ( Exception e ) { return - 1 ; } } | Get the saving of this code in bytes |
32,571 | public long getDoneSaving ( Configuration conf ) { try { DFSClient dfs = ( ( DistributedFileSystem ) FileSystem . get ( conf ) ) . getClient ( ) ; Counters raidedCounters = stateToSourceCounters . get ( RaidState . RAIDED ) ; Counters shouldRaidCounters = stateToSourceCounters . get ( RaidState . NOT_RAIDED_BUT_SHOULD ) ; long physical = estimatedDoneSourceSize + estimatedDoneParitySize ; long logical = raidedCounters . getNumLogical ( ) + shouldRaidCounters . getNumLogical ( ) ; return logical * dfs . getDefaultReplication ( ) - physical ; } catch ( Exception e ) { return - 1 ; } } | Get the estimated saving of this code in bytes when RAIDing is done |
32,572 | public static synchronized void removeDefaultResource ( String name ) { if ( defaultResources . contains ( name ) ) { defaultResources . remove ( name ) ; for ( Configuration conf : REGISTRY . keySet ( ) ) { if ( conf . loadDefaults ) { conf . reloadConfiguration ( ) ; } } } } | Remove default resource |
32,573 | private void loadEntireJsonObject ( JSONObject json ) throws JSONException { Iterator < ? > it = json . keys ( ) ; while ( it . hasNext ( ) ) { Object obj = it . next ( ) ; if ( ! ( obj instanceof String ) ) { LOG . warn ( "Object not instance of string : " + obj + " skipping" ) ; continue ; } String key = ( String ) obj ; JSONObject partition = json . getJSONObject ( key ) ; loadJsonResource ( partition , properties , key ) ; } } | Loads an entire json configuration object which would mostly contain many sub - sections like core - site . xml and hdfs - site . xml |
32,574 | public String xmlToThrift ( String name ) { name = name . replace ( "-custom.xml" , "" ) ; name = name . replace ( ".xml" , "" ) ; name = name . replace ( "-" , "_" ) ; return name ; } | Maps xml configs to their respective key names in the file config . materialized_JSON |
32,575 | public JSONObject instantiateJsonObject ( InputStream in ) throws IOException , JSONException { BufferedReader reader = new BufferedReader ( new InputStreamReader ( in ) ) ; StringBuffer contents = new StringBuffer ( ) ; String text = null ; while ( ( text = reader . readLine ( ) ) != null ) { contents . append ( text ) . append ( System . getProperty ( "line.separator" ) ) ; } in . close ( ) ; JSONObject json = new JSONObject ( contents . toString ( ) ) ; return json ; } | Given an input stream read in the contents of the file and instantiate a JSON object if it exists . |
32,576 | public JSONObject getJsonConfig ( String name ) throws IOException , JSONException { if ( name . endsWith ( ".xml" ) ) { URL url = getResource ( MATERIALIZEDJSON ) ; if ( url != null ) { InputStream in = url . openStream ( ) ; if ( in != null ) { JSONObject json = instantiateJsonObject ( in ) ; if ( json . has ( xmlToThrift ( name ) ) ) { return json . getJSONObject ( xmlToThrift ( name ) ) ; } } } } return null ; } | Given a xml config file specified as a string return the corresponding json object if it exists . |
32,577 | public JSONObject getJsonConfig ( Path name ) throws IOException , JSONException { String pathString = name . toUri ( ) . getPath ( ) ; String xml = new Path ( pathString ) . getName ( ) ; File jsonFile = new File ( pathString . replace ( xml , MATERIALIZEDJSON ) ) . getAbsoluteFile ( ) ; if ( jsonFile . exists ( ) ) { InputStream in = new BufferedInputStream ( new FileInputStream ( jsonFile ) ) ; if ( in != null ) { JSONObject json = instantiateJsonObject ( in ) ; if ( json . has ( xmlToThrift ( xml ) ) ) { return json . getJSONObject ( xmlToThrift ( xml ) ) ; } } } return null ; } | Given a xml config file specified by a path return the corresponding json object if it exists . |
32,578 | private Object convertFile ( Object name ) throws IOException , JSONException { if ( name instanceof String ) { String file = ( String ) name ; JSONObject json = getJsonConfig ( file ) ; if ( json != null ) { return json ; } } else if ( name instanceof Path ) { Path file = ( Path ) name ; JSONObject json = getJsonConfig ( file ) ; if ( json != null ) { return json ; } } return name ; } | Given a xml config file return the corresponding json object if it exists . |
32,579 | private void loadJsonResource ( JSONObject json , Properties properties , Object name ) throws JSONException { Iterator < ? > keys = json . keys ( ) ; while ( keys . hasNext ( ) ) { Object obj = keys . next ( ) ; if ( ! ( obj instanceof String ) ) { LOG . warn ( "Object not instance of string : " + obj + " skipping" ) ; continue ; } String key = ( String ) obj ; String keyUnderscoresToDots = key . replace ( "_" , "." ) ; keyUnderscoresToDots = keyUnderscoresToDots . replace ( ".." , "_" ) ; if ( ! json . isNull ( key ) ) { Object value = json . get ( key ) ; String stringVal = "" ; if ( value instanceof String ) { stringVal = ( String ) value ; } else if ( value instanceof Integer ) { stringVal = new Integer ( ( Integer ) value ) . toString ( ) ; } else if ( value instanceof Long ) { stringVal = new Long ( ( Long ) value ) . toString ( ) ; } else if ( value instanceof Double ) { stringVal = new Double ( ( Double ) value ) . toString ( ) ; } else if ( value instanceof Boolean ) { stringVal = new Boolean ( ( Boolean ) value ) . toString ( ) ; } else if ( value instanceof JSONObject ) { loadJsonResource ( ( JSONObject ) value , properties , name ) ; continue ; } else { LOG . warn ( "unsupported value in json object: " + value ) ; } if ( ! finalParameters . contains ( keyUnderscoresToDots ) ) { properties . setProperty ( keyUnderscoresToDots , stringVal ) ; updatingResource . put ( keyUnderscoresToDots , name . toString ( ) ) ; } else { LOG . warn ( name + ":a attempt to override final parameter: " + keyUnderscoresToDots + "; Ignoring." ) ; } } } } | Helper function to loadResource that adds resources to the properties parameter by parsing through a json object . |
32,580 | void writeTo ( DataOutputStream out ) throws IOException { int length = size ( ) - 4 ; out . writeInt ( length ) ; out . writeShort ( codecNameUTF8 . length ) ; out . write ( codecNameUTF8 ) ; if ( codecNameUTF8 . length == 0 ) { out . writeLong ( crc32Value ) ; } out . write ( storedData . getData ( ) , 0 , storedData . getLength ( ) ) ; } | Write this data segment into an OutputStream . |
32,581 | static byte [ ] getCodecNameUTF8 ( String compressionCodecName ) { byte [ ] codecNameBytes = CODEC_NAME_CACHE . get ( compressionCodecName ) ; if ( codecNameBytes == null ) { try { codecNameBytes = compressionCodecName . getBytes ( "UTF-8" ) ; } catch ( UnsupportedEncodingException e ) { throw new RuntimeException ( e ) ; } CODEC_NAME_CACHE . put ( compressionCodecName , codecNameBytes ) ; } return codecNameBytes ; } | Convert from String to UTF8 byte array . |
32,582 | private int sendChunks ( ByteBuffer pkt , int maxChunks , OutputStream out ) throws IOException { int len = ( int ) Math . min ( endOffset - offset , ( ( ( long ) bytesPerChecksum ) * ( ( long ) maxChunks ) ) ) ; if ( len > bytesPerChecksum && len % bytesPerChecksum != 0 ) { len -= len % bytesPerChecksum ; } if ( len == 0 ) { return 0 ; } int numChunks = ( len + bytesPerChecksum - 1 ) / bytesPerChecksum ; int packetLen = len + numChunks * checksumSize + 4 ; pkt . clear ( ) ; pkt . putInt ( packetLen ) ; if ( pktIncludeVersion ) { pkt . putInt ( packetVersion ) ; } pkt . putLong ( offset ) ; pkt . putLong ( seqno ) ; pkt . put ( ( byte ) ( ( offset + len >= endOffset ) ? 1 : 0 ) ) ; pkt . putInt ( len ) ; int checksumOff = pkt . position ( ) ; byte [ ] buf = pkt . array ( ) ; blockReader . sendChunks ( out , buf , offset , checksumOff , numChunks , len , crcUpdater , packetVersion ) ; if ( throttler != null ) { throttler . throttle ( packetLen ) ; } return len ; } | Sends upto maxChunks chunks of data . |
32,583 | private void insertCurrentStoredBlockIntoList ( ) { if ( currentStoredBlock == null || currentStoredBlockIndex < 0 ) return ; if ( head == null ) { head = currentStoredBlock ; headIndex = currentStoredBlockIndex ; tail = currentStoredBlock ; tailIndex = currentStoredBlockIndex ; head . setNext ( currentStoredBlockIndex , null ) ; head . setPrevious ( currentStoredBlockIndex , null ) ; } else { head . setPrevious ( headIndex , currentStoredBlock ) ; currentStoredBlock . setNext ( currentStoredBlockIndex , head ) ; head = currentStoredBlock ; headIndex = currentStoredBlockIndex ; } addedBlocks ++ ; resetCurrentStoredBlock ( ) ; } | Insert the current stored block into the local list of blocks belonging to the datanode descriptor . |
32,584 | static void processReport ( FSNamesystem namesystem , Collection < Block > toRetry , BlockListAsLongs newReport , DatanodeDescriptor node , ExecutorService initialBlockReportExecutor ) throws IOException { int numShards = Math . min ( namesystem . parallelProcessingThreads , ( ( newReport . getNumberOfBlocks ( ) + namesystem . parallelBRblocksPerShard - 1 ) / namesystem . parallelBRblocksPerShard ) ) ; List < Future < List < Block > > > workers = new ArrayList < Future < List < Block > > > ( numShards ) ; for ( int i = 0 ; i < numShards ; i ++ ) { workers . add ( initialBlockReportExecutor . submit ( new InitialReportWorker ( newReport , i , numShards , node , namesystem . getNameNode ( ) . shouldRetryAbsentBlocks ( ) , namesystem ) ) ) ; } try { for ( Future < List < Block > > worker : workers ) { if ( namesystem . getNameNode ( ) . shouldRetryAbsentBlocks ( ) ) { toRetry . addAll ( worker . get ( ) ) ; } else { worker . get ( ) ; } } } catch ( ExecutionException e ) { LOG . warn ( "Parallel report failed" , e ) ; throw new IOException ( e ) ; } catch ( InterruptedException e ) { throw new IOException ( "Interruption" , e ) ; } } | Processes a single initial block reports by spawning multiple threads to handle insertion to the blocks map . Each thread stores the inserted blocks in a local list and at the end the list are concatenated for a single datanode descriptor . |
32,585 | private long computeRenewalPeriod ( ) { long hardLeaseLimit = conf . getLong ( FSConstants . DFS_HARD_LEASE_KEY , FSConstants . LEASE_HARDLIMIT_PERIOD ) ; long softLeaseLimit = conf . getLong ( FSConstants . DFS_SOFT_LEASE_KEY , FSConstants . LEASE_SOFTLIMIT_PERIOD ) ; long renewal = Math . min ( hardLeaseLimit , softLeaseLimit ) / 2 ; long hdfsTimeout = Client . getTimeout ( conf ) ; if ( hdfsTimeout > 0 ) { renewal = Math . min ( renewal , hdfsTimeout / 2 ) ; } return renewal ; } | Computes the renewal period for the lease . |
32,586 | public static InetSocketAddress createWithResolveRetry ( String hostname , int port ) { return createWithResolveRetry ( hostname , port , DEFAULT_DELAY_MILLIS , DEFAULT_MAX_ATTEMPTS ) ; } | Utility function to create an InetSocketAddress that has been resolved . Retries once and sleeps 100ms between the failure and the retry |
32,587 | public static InetSocketAddress createWithResolveRetry ( String hostname , int port , int delayMillis , int maxAttempt ) { InetSocketAddress socketAddress ; int attempts = 0 ; do { socketAddress = new InetSocketAddress ( hostname , port ) ; if ( socketAddress . isUnresolved ( ) ) { attempts ++ ; LOG . info ( String . format ( "failed to resolve host %s, attempt %d" , hostname , attempts ) ) ; try { Thread . sleep ( delayMillis ) ; } catch ( InterruptedException e ) { Thread . currentThread ( ) . interrupt ( ) ; } } else if ( attempts > 0 ) { LOG . info ( String . format ( "successful resolution on attempt %d" , attempts ) ) ; } } while ( socketAddress . isUnresolved ( ) && attempts < maxAttempt ) ; return socketAddress ; } | Utility function to create an InetSocketAddress that has been resolved . Retries with a small sleep in between |
32,588 | public int run ( String [ ] args ) throws Exception { int exitCode = 0 ; exitCode = init ( args ) ; if ( exitCode != 0 ) { return exitCode ; } genDirStructure ( ) ; output ( new File ( outDir , DIR_STRUCTURE_FILE_NAME ) ) ; genFileStructure ( ) ; outputFiles ( new File ( outDir , FILE_STRUCTURE_FILE_NAME ) ) ; return exitCode ; } | The main function first parses the command line arguments then generates in - memory directory structure and outputs to a file last generates in - memory files and outputs them to a file . |
32,589 | private List < INode > getLeaves ( ) { List < INode > leaveDirs = new ArrayList < INode > ( ) ; root . getLeaves ( leaveDirs ) ; return leaveDirs ; } | Collects leaf nodes in the tree |
32,590 | private void genFileStructure ( ) { List < INode > leaves = getLeaves ( ) ; int totalLeaves = leaves . size ( ) ; for ( int i = 0 ; i < numOfFiles ; i ++ ) { int leaveNum = r . nextInt ( totalLeaves ) ; double fileSize ; do { fileSize = r . nextGaussian ( ) + avgFileSize ; } while ( fileSize < 0 ) ; leaves . get ( leaveNum ) . addChild ( new FileINode ( FILE_NAME_PREFIX + i , fileSize ) ) ; } } | Decides where to place all the files and its length . It first collects all empty directories in the tree . For each file it randomly chooses an empty directory to place the file . The file s length is generated using Gaussian distribution . |
32,591 | private void output ( File outFile ) throws FileNotFoundException { System . out . println ( "Printing to " + outFile . toString ( ) ) ; PrintStream out = new PrintStream ( outFile ) ; root . output ( out , null ) ; out . close ( ) ; } | Output directory structure to a file each line of the file contains the directory name . Only empty directory names are printed . |
32,592 | public static String getReasoning ( final BlockedAdmissionReason reason , final int reasonLimit , final int reasonActualValue , final int hardAdmissionPosition , JobAdmissionWaitInfo jobAdmissionWaitInfo ) { if ( reason == BlockedAdmissionReason . HARD_CLUSTER_WIDE_MAX_TASKS_EXCEEDED ) { if ( jobAdmissionWaitInfo == null ) { return reason . toString ( ) + "." ; } else { StringBuffer sb = new StringBuffer ( ) ; sb . append ( reason . toString ( ) + ". In order to protect the jobtracker " + "from exceeding hard memory limits based on the number of " + "total tracked tasks, the cluster is now in cluster-wide " + "hard admission control and accepts jobs on a first come, " + "first served (FIFO) basis. Your job will be admitted " + "according to this policy." ) ; if ( jobAdmissionWaitInfo . getAverageCount ( ) > 0 ) { sb . append ( " The past " + jobAdmissionWaitInfo . getAverageCount ( ) + " jobs admitted while in hard admission control were " + "added in an average of " + jobAdmissionWaitInfo . getAverageWaitMsecsPerHardAdmissionJob ( ) + " msecs, giving this job a rough estimated wait time of " + ( jobAdmissionWaitInfo . getAverageWaitMsecsPerHardAdmissionJob ( ) * ( hardAdmissionPosition + 1 ) ) + " msecs." ) ; } return sb . toString ( ) ; } } else if ( reason == BlockedAdmissionReason . SOFT_CLUSTER_WIDE_MAX_TASKS_EXCEEEDED ) { return reason . toString ( ) + "." ; } else { return reason . toString ( ) + " " + reasonActualValue + " exceeds " + reasonLimit + "." ; } } | Compose the reason message . |
32,593 | public int getPartition ( K key , V value , int numPartitions ) { Integer result = cache . get ( ) ; if ( result == null ) { return part . getPartition ( key , value , numPartitions ) ; } else { return result ; } } | If a partition result was set manually return it . Otherwise we call the Java partitioner . |
32,594 | boolean isParityFile ( Path p , Codec c ) { return isParityFile ( p . toUri ( ) . getPath ( ) , c ) ; } | Is the path a parity file of a given Codec? |
32,595 | boolean reconstructFile ( Path srcPath , Context context ) throws IOException , InterruptedException { Progressable progress = context ; if ( progress == null ) { progress = RaidUtils . NULL_PROGRESSABLE ; } FileSystem fs = srcPath . getFileSystem ( getConf ( ) ) ; FileStatus srcStat = null ; try { srcStat = fs . getFileStatus ( srcPath ) ; } catch ( FileNotFoundException ex ) { return false ; } if ( RaidNode . isParityHarPartFile ( srcPath ) ) { return processParityHarPartFile ( srcPath , progress ) ; } for ( Codec codec : Codec . getCodecs ( ) ) { if ( isParityFile ( srcPath , codec ) ) { Decoder decoder = new Decoder ( getConf ( ) , codec ) ; decoder . connectToStore ( srcPath ) ; return processParityFile ( srcPath , decoder , context ) ; } } for ( Codec codec : Codec . getCodecs ( ) ) { ParityFilePair ppair = ParityFilePair . getParityFile ( codec , srcStat , getConf ( ) ) ; if ( ppair != null ) { Decoder decoder = new Decoder ( getConf ( ) , codec ) ; decoder . connectToStore ( srcPath ) ; return processFile ( srcPath , ppair , decoder , false , context ) ; } } for ( Codec codec : Codec . getCodecs ( ) ) { if ( ! codec . isDirRaid ) { continue ; } try { Decoder decoder = new Decoder ( getConf ( ) , codec ) ; decoder . connectToStore ( srcPath ) ; if ( processFile ( srcPath , null , decoder , true , context ) ) { return true ; } } catch ( Exception ex ) { LogUtils . logRaidReconstructionMetrics ( LOGRESULTS . FAILURE , 0 , codec , srcPath , - 1 , LOGTYPES . OFFLINE_RECONSTRUCTION_USE_STRIPE , fs , ex , context ) ; } } return false ; } | Fix a file report progess . |
32,596 | void sortLostFiles ( List < String > files ) { Comparator < String > comp = new Comparator < String > ( ) { public int compare ( String p1 , String p2 ) { Codec c1 = null ; Codec c2 = null ; for ( Codec codec : Codec . getCodecs ( ) ) { if ( isParityFile ( p1 , codec ) ) { c1 = codec ; } else if ( isParityFile ( p2 , codec ) ) { c2 = codec ; } } if ( c1 == null && c2 == null ) { return 0 ; } if ( c1 == null && c2 != null ) { return - 1 ; } if ( c2 == null && c1 != null ) { return 1 ; } return c2 . priority - c1 . priority ; } } ; Collections . sort ( files , comp ) ; } | Sorts source files ahead of parity files . |
32,597 | protected DistributedFileSystem getDFS ( Path p ) throws IOException { FileSystem fs = p . getFileSystem ( getConf ( ) ) ; DistributedFileSystem dfs = null ; if ( fs instanceof DistributedFileSystem ) { dfs = ( DistributedFileSystem ) fs ; } else if ( fs instanceof FilterFileSystem ) { FilterFileSystem ffs = ( FilterFileSystem ) fs ; if ( ffs . getRawFileSystem ( ) instanceof DistributedFileSystem ) { dfs = ( DistributedFileSystem ) ffs . getRawFileSystem ( ) ; } } return dfs ; } | Returns a DistributedFileSystem hosting the path supplied . |
32,598 | void checkLostBlocks ( List < Block > blocksLostChecksum , List < Block > blocksLostStripe , Path p , Codec codec ) throws IOException { StringBuilder message = new StringBuilder ( ) ; if ( blocksLostChecksum . size ( ) > 0 ) { message . append ( "Lost " + blocksLostChecksum . size ( ) + " checksums in blocks:" ) ; for ( Block blk : blocksLostChecksum ) { message . append ( " " ) ; message . append ( blk . toString ( ) ) ; } } if ( blocksLostStripe . size ( ) > 0 ) { message . append ( "Lost " + blocksLostStripe . size ( ) + " stripes in blocks:" ) ; for ( Block blk : blocksLostStripe ) { message . append ( " " ) ; message . append ( blk . toString ( ) ) ; } } if ( message . length ( ) == 0 ) return ; message . append ( " in file " + p ) ; throw new IOException ( message . toString ( ) ) ; } | Throw exceptions for blocks with lost checksums or stripes |
32,599 | boolean processParityHarPartFile ( Path partFile , Progressable progress ) throws IOException { LOG . info ( "Processing parity HAR file " + partFile ) ; DistributedFileSystem dfs = getDFS ( partFile ) ; FileStatus partFileStat = dfs . getFileStatus ( partFile ) ; long partFileBlockSize = partFileStat . getBlockSize ( ) ; LOG . info ( partFile + " has block size " + partFileBlockSize ) ; HarIndex harIndex = HarIndex . getHarIndex ( dfs , partFile ) ; String uriPath = partFile . toUri ( ) . getPath ( ) ; int numBlocksReconstructed = 0 ; List < LocatedBlockWithMetaInfo > lostBlocks = lostBlocksInFile ( dfs , uriPath , partFileStat ) ; if ( lostBlocks . size ( ) == 0 ) { LOG . warn ( "Couldn't find any lost blocks in HAR file " + partFile + ", ignoring..." ) ; return false ; } for ( LocatedBlockWithMetaInfo lb : lostBlocks ) { Block lostBlock = lb . getBlock ( ) ; long lostBlockOffset = lb . getStartOffset ( ) ; File localBlockFile = File . createTempFile ( lostBlock . getBlockName ( ) , ".tmp" ) ; localBlockFile . deleteOnExit ( ) ; try { processParityHarPartBlock ( dfs , partFile , lostBlockOffset , partFileStat , harIndex , localBlockFile , progress ) ; computeMetadataAndSendReconstructedBlock ( localBlockFile , lostBlock , localBlockFile . length ( ) , lb . getLocations ( ) , lb . getDataProtocolVersion ( ) , lb . getNamespaceID ( ) , progress ) ; numBlocksReconstructed ++ ; } finally { localBlockFile . delete ( ) ; } progress . progress ( ) ; } LOG . info ( "Reconstructed " + numBlocksReconstructed + " blocks in " + partFile ) ; return true ; } | Reads through a parity HAR part file reconstructing lost blocks on the way . A HAR block can contain many file blocks as long as the HAR part file block size is a multiple of the file block size . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.