idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
33,400 | private static double totalShareWithRatio ( Collection < ? extends Schedulable > schedulables , double weightToShareRatio ) { double totalShare = 0 ; for ( Schedulable schedulable : schedulables ) { totalShare += shareWithRatio ( schedulable , weightToShareRatio ) ; } return totalShare ; } | Compute the total share of the schedulables given a weightToShareRatio |
33,401 | private static double shareWithRatio ( Schedulable schedulable , double weightToShareRatio ) { double share = schedulable . getWeight ( ) * weightToShareRatio ; int min = schedulable . getMinimum ( ) ; int max = schedulable . getMaximum ( ) ; int requested = schedulable . getRequested ( ) ; share = Math . max ( min , share ) ; share = Math . min ( max , share ) ; share = Math . min ( requested , share ) ; return share ; } | Get the share of the schedulable given a weightToShareRatio . This takes into account the min and the max allocation and is used to compute the weightToShareRatio globally |
33,402 | private static double assignShareIfUnderAllocated ( double totalShare , final Collection < ? extends Schedulable > schedulables ) { double totalMinDemand = 0 ; for ( Schedulable schedulable : schedulables ) { schedulable . share = 0 ; totalMinDemand += Math . min ( schedulable . getRequested ( ) , schedulable . getMinimum ( ) ) ; } if ( ( totalMinDemand > 0 ) && ( totalMinDemand >= totalShare ) ) { distributeShareMin ( schedulables ) ; } return totalShare - totalMinDemand ; } | Assign fair share if total share < min demand . |
33,403 | private static TreeMap < Integer , Vector < Schedulable > > generatePriorityGroupedSchedulables ( final Collection < ? extends Schedulable > schedulables ) { TreeMap < Integer , Vector < Schedulable > > prioritizedSchedulableMap = new TreeMap < Integer , Vector < Schedulable > > ( ) ; for ( Schedulable schedulable : schedulables ) { if ( ! prioritizedSchedulableMap . containsKey ( schedulable . getPriority ( ) ) ) { prioritizedSchedulableMap . put ( schedulable . getPriority ( ) , new Vector < Schedulable > ( ) ) ; } prioritizedSchedulableMap . get ( schedulable . getPriority ( ) ) . add ( schedulable ) ; } return prioritizedSchedulableMap ; } | Group a collection of schedulables into priority groups sorted by priority . |
33,404 | private static void distributeSharePriority ( double total , final Collection < ? extends Schedulable > schedulables ) { double residualShare = assignShareIfUnderAllocated ( total , schedulables ) ; if ( residualShare <= 0.0 ) { return ; } TreeMap < Integer , Vector < Schedulable > > prioritizedSchedulableMap = generatePriorityGroupedSchedulables ( schedulables ) ; trickleShareDownPriorityGroups ( residualShare , prioritizedSchedulableMap ) ; } | Distribute the total share among the list of schedulables according to the PRIORITY model . Note that the eventual intent is to determine if preemption is necessary . So we need to assign the most ideal share on each schedulable beyond which preemption is necessary . Finds a way to distribute the share in such a way that all the min and max reservations of the schedulables are satisfied . In the PRIORITY scheduling MINs are always granted to all pools . Therefore irrespective of the demand of a pool its share according to the algorithm will be at least MIN . |
33,405 | private static void distributeShareMin ( Collection < ? extends Schedulable > schedulableVector ) { for ( Schedulable schedulable : schedulableVector ) { schedulable . share += schedulable . getMinimum ( ) ; } } | Increment each of the schedulables share by the minimum demand . Note that each pool has to be guaranteed at least its MIN value according to PRIORITY scheduling . |
33,406 | private static void distributeShareMax ( Collection < ? extends Schedulable > schedulableVector ) { for ( Schedulable schedulable : schedulableVector ) { double minShare = Math . max ( schedulable . getMinimum ( ) , schedulable . getRequested ( ) ) ; schedulable . share += Math . min ( schedulable . getMaximum ( ) , minShare ) ; } } | Increment each of the schedulables share by the lesser of its min demand or its maximum . |
33,407 | private int parseMonth ( String month ) { for ( int i = 0 ; i < months . length ; i ++ ) if ( months [ i ] . startsWith ( month ) ) return i ; return - 1 ; } | Convert the name of a month to the corresponding int value . |
33,408 | long writeFixedBlock ( FSDataInputStream [ ] inputs , int [ ] erasedLocations , int erasedLocationToFix , long limit , OutputStream out , Progressable reporter , ParallelStreamReader parallelReader , CRC32 crc ) throws IOException { LOG . info ( "Need to write " + limit + " bytes for erased location index " + erasedLocationToFix ) ; if ( crc != null ) { crc . reset ( ) ; } int [ ] tmp = new int [ inputs . length ] ; int [ ] decoded = new int [ erasedLocations . length ] ; long written ; for ( written = 0 ; written < limit ; ) { erasedLocations = readFromInputs ( inputs , erasedLocations , limit , reporter , parallelReader ) ; if ( decoded . length != erasedLocations . length ) { decoded = new int [ erasedLocations . length ] ; } int toWrite = ( int ) Math . min ( ( long ) bufSize , limit - written ) ; int partSize = ( int ) Math . ceil ( bufSize * 1.0 / parallelism ) ; try { long startTime = System . currentTimeMillis ( ) ; for ( int i = 0 ; i < parallelism ; i ++ ) { decodeOps . acquire ( 1 ) ; int start = i * partSize ; int count = Math . min ( bufSize - start , partSize ) ; parallelDecoder . execute ( new DecodeOp ( readBufs , writeBufs , start , count , erasedLocations , reedSolomonCode [ i ] ) ) ; } decodeOps . acquire ( parallelism ) ; decodeOps . release ( parallelism ) ; decodeTime += ( System . currentTimeMillis ( ) - startTime ) ; } catch ( InterruptedException e ) { throw new IOException ( "Interrupted while waiting for read result" ) ; } for ( int i = 0 ; i < erasedLocations . length ; i ++ ) { if ( erasedLocations [ i ] == erasedLocationToFix ) { out . write ( writeBufs [ i ] , 0 , toWrite ) ; if ( crc != null ) { crc . update ( writeBufs [ i ] , 0 , toWrite ) ; } written += toWrite ; break ; } } } return written ; } | Decode the inputs provided and write to the output . |
33,409 | public static Configuration mergeConfiguration ( URI uri , Configuration conf ) throws IOException { try { Long lastBadAccess = badURIs . get ( uri . getHost ( ) ) ; if ( lastBadAccess != null ) { if ( System . currentTimeMillis ( ) - lastBadAccess < BAD_URI_EXPIRY ) { return conf ; } else { badURIs . remove ( uri . getHost ( ) ) ; } } boolean lookupLogical = conf . getBoolean ( "dfs.client.configerator.logical.lookup.enabled" , false ) ; Properties props = new Properties ( System . getProperties ( ) ) ; props . setProperty ( "dfs.client.configerator.logical.lookup.enabled" , lookupLogical + "" ) ; String configDir = conf . get ( "dfs.client.configerator.dir" ) ; if ( configDir != null ) { props . setProperty ( "dfs.client.configerator.dir" , configDir ) ; } String json = getInstance ( uri , conf ) . getConfiguration ( uri . getHost ( ) , conf . getInt ( "hdfs.retrieve.client_configuration_timeout" , 3000 ) , props ) ; if ( json == null ) { LOG . info ( "Client configuration lookup disabled/failed. " + "Using default configuration" ) ; return conf ; } Configuration newConf = new Configuration ( conf ) ; JSONObject jsonObj = new JSONObject ( json ) ; Configuration clientConf = new Configuration ( jsonObj ) ; Iterator < Map . Entry < String , String > > it = clientConf . iterator ( ) ; while ( it . hasNext ( ) ) { Map . Entry < String , String > entry = it . next ( ) ; String key = entry . getKey ( ) ; String val = entry . getValue ( ) ; newConf . set ( key , val ) ; } newConf . setBoolean ( "client.configuration.lookup.done" , true ) ; return newConf ; } catch ( Throwable t ) { badURIs . put ( uri . getHost ( ) , System . currentTimeMillis ( ) ) ; LOG . info ( "Problem retreiving client side configuration " + ". Using default configuration instead" , t ) ; return conf ; } } | Retrieves a modified version of the configuration from the supplied configuration . This is used to override the configuration supplied with the client side configuration retried from a different source . |
33,410 | public static JVMId forName ( String str ) throws IllegalArgumentException { if ( str == null ) return null ; try { String [ ] parts = str . split ( "_" ) ; if ( parts . length == 5 ) { if ( parts [ 0 ] . equals ( JVM ) ) { boolean isMap = false ; if ( parts [ 3 ] . equals ( "m" ) ) isMap = true ; else if ( parts [ 3 ] . equals ( "r" ) ) isMap = false ; else throw new Exception ( ) ; return new JVMId ( parts [ 1 ] , Integer . parseInt ( parts [ 2 ] ) , isMap , Integer . parseInt ( parts [ 4 ] ) ) ; } } } catch ( Exception ex ) { } throw new IllegalArgumentException ( "TaskId string : " + str + " is not properly formed" ) ; } | Construct a JVMId object from given string |
33,411 | private String [ ] readFile ( String fileName ) throws IOException { ArrayList < String > result = new ArrayList < String > ( ) ; FileReader fReader = new FileReader ( fileName ) ; BufferedReader bReader = new BufferedReader ( fReader ) ; while ( true ) { String line = bReader . readLine ( ) ; if ( line == null ) { break ; } result . add ( line ) ; } bReader . close ( ) ; fReader . close ( ) ; return ( String [ ] ) result . toArray ( new String [ result . size ( ) ] ) ; } | Read a file line by line |
33,412 | protected String [ ] getPS ( ) { ShellCommandExecutor shellExecutor = new ShellCommandExecutor ( CMD ) ; try { shellExecutor . execute ( ) ; } catch ( IOException e ) { LOG . error ( StringUtils . stringifyException ( e ) ) ; return null ; } return shellExecutor . getOutput ( ) . split ( "\n" ) ; } | Execute ps - eo pid ppid pcpu rss command |
33,413 | private String [ ] [ ] parsePS ( String [ ] psStrings ) { String [ ] [ ] result = new String [ psStrings . length - 1 ] [ NUM_FIELDS ] ; for ( int i = 1 ; i < psStrings . length ; i ++ ) { Matcher matcher = psPattern . matcher ( psStrings [ i ] ) ; if ( matcher . find ( ) ) { for ( int j = 0 ; j < NUM_FIELDS ; j ++ ) { result [ i - 1 ] [ j ] = matcher . group ( j + 1 ) ; } } } return result ; } | Parse PS results into fields |
33,414 | private double percentageToGHz ( double cpuUsage ) { cpuUsage /= 100 ; cpuUsage /= ttUtilization . getNumCpu ( ) ; cpuUsage *= ttUtilization . getCpuTotalGHz ( ) ; return cpuUsage ; } | ps - eo pcpu gives per core % . We convert it to GHz |
33,415 | private double [ ] getSubProcessUsage ( String pid , Map < String , String [ ] > pidToContent , Map < String , LinkedList < String > > pidToChildPid ) { double cpuMemUsage [ ] = new double [ 2 ] ; Queue < String > pidQueue = new LinkedList < String > ( ) ; pidQueue . add ( pid ) ; while ( ! pidQueue . isEmpty ( ) ) { pid = pidQueue . poll ( ) ; for ( String child : pidToChildPid . get ( pid ) ) { pidQueue . add ( child ) ; } String [ ] psContent = pidToContent . get ( pid ) ; double cpuUsage = Double . parseDouble ( psContent [ PCPU ] ) ; cpuUsage = percentageToGHz ( cpuUsage ) ; double memUsage = Double . parseDouble ( psContent [ RSS ] ) ; memUsage /= 1000000d ; cpuMemUsage [ 0 ] += cpuUsage ; cpuMemUsage [ 1 ] += memUsage ; } return cpuMemUsage ; } | A function computes the Memory and CPU usage of all subprocess |
33,416 | void clear ( ) { for ( int i = 0 ; i < LEVEL ; i ++ ) { priorityQueues . get ( i ) . clear ( ) ; } raidQueue . clear ( ) ; } | Empty the queues . |
33,417 | synchronized int size ( int priority ) { if ( priority < 0 || priority >= LEVEL ) { throw new IllegalArgumentException ( "Unsupported priority: " + priority ) ; } return priorityQueues . get ( priority ) . size ( ) ; } | Return the number of under replication blocks of priority |
33,418 | synchronized int getSize ( int priority ) { int size = 0 ; for ( int i = priority ; i < LEVEL ; i ++ ) { size += priorityQueues . get ( i ) . size ( ) ; } return size ; } | get the number of under replicated blocks with equal or higher priority |
33,419 | public void removeNode ( ClusterNode node ) { String host = node . getHost ( ) ; NodeContainer container = hostToRunnableNode . get ( host ) ; if ( container != null ) { if ( container . removeNode ( node ) ) { runnableNodeCount -- ; } if ( container . isEmpty ( ) ) { hostToRunnableNode . remove ( host ) ; } } Node rack = topologyCache . getNode ( host ) . getParent ( ) ; container = rackToRunnableNode . get ( rack ) ; if ( container != null ) { container . removeNode ( node ) ; if ( container . isEmpty ( ) ) { rackToRunnableNode . remove ( rack ) ; } } } | Remove a node from the snapshot . |
33,420 | void visit ( ImageElement element , int value ) throws IOException { visit ( element , Integer . toString ( value ) ) ; } | Convenience methods to automatically convert numeric value types to strings |
33,421 | void visitEnclosingElement ( ImageElement element , ImageElement key , int value ) throws IOException { visitEnclosingElement ( element , key , Integer . toString ( value ) ) ; } | Convenience methods to automatically convert value types to strings |
33,422 | public void addSession ( String id , Session session ) { for ( SchedulerForType scheduleThread : schedulersForTypes . values ( ) ) { scheduleThread . addSession ( id , session ) ; } } | Add a session for scheduling . |
33,423 | public void start ( ) { for ( Thread schedulerForType : schedulersForTypes . values ( ) ) { LOG . info ( "Starting " + schedulerForType . getName ( ) ) ; schedulerForType . start ( ) ; } configManager . start ( ) ; } | Start the scheduling as well as the reloadable configuration manager . |
33,424 | public void close ( ) { for ( SchedulerForType scheduleThread : schedulersForTypes . values ( ) ) { scheduleThread . close ( ) ; } for ( Thread scheduleThread : schedulersForTypes . values ( ) ) { Utilities . waitThreadTermination ( scheduleThread ) ; } configManager . close ( ) ; } | Stop the scheduling and disable the reloadable configuration manager . |
33,425 | public Map < PoolInfo , PoolInfoMetrics > getPoolInfoMetrics ( ResourceType type ) { return schedulersForTypes . get ( type ) . getPoolInfoMetrics ( ) ; } | Get an unmodifiable snapshot of the mapping from pool info to associated metrics . |
33,426 | public List < PoolInfo > getPoolInfos ( ) { Set < PoolInfo > poolInfos = new HashSet < PoolInfo > ( ) ; for ( ResourceType type : types ) { poolInfos . addAll ( getPoolInfoMetrics ( type ) . keySet ( ) ) ; } List < PoolInfo > result = new ArrayList < PoolInfo > ( ) ; result . addAll ( poolInfos ) ; Collections . sort ( result ) ; return result ; } | Get a snapshot of the pool infos that is sorted . |
33,427 | public void submitMetrics ( MetricsRecord metricsRecord ) { List < PoolMetadata > poolMetadatas = getPoolMetadataList ( ) ; PoolFairnessCalculator . calculateFairness ( poolMetadatas , metricsRecord ) ; for ( SchedulerForType scheduler : schedulersForTypes . values ( ) ) { scheduler . submitMetrics ( ) ; } } | Submit the metrics . |
33,428 | static MD5Hash downloadImageToStorage ( String fsName , long imageTxId , FSImage fsImage , boolean needDigest ) throws IOException { return downloadImageToStorage ( fsName , imageTxId , fsImage , needDigest , false ) ; } | Download image to local storage with throttling . |
33,429 | static MD5Hash downloadImageToStorage ( String fsName , long imageTxId , FSImage dstImage , boolean needDigest , boolean disableThrottle ) throws IOException { String fileid = GetImageServlet . getParamStringForImage ( imageTxId , dstImage . storage , disableThrottle ) ; List < OutputStream > outputStreams = dstImage . getCheckpointImageOutputStreams ( imageTxId ) ; if ( outputStreams . size ( ) == 0 ) { throw new IOException ( "No targets in destination storage!" ) ; } MD5Hash hash = getFileClient ( fsName , fileid , outputStreams , dstImage . storage , needDigest ) ; LOG . info ( "Downloaded image files for txid: " + imageTxId ) ; return hash ; } | Download image to local storage . Throttling can be disabled . |
33,430 | static void uploadImageFromStorage ( String fsName , String machine , int port , NNStorage storage , long txid ) throws IOException { String fileid = GetImageServlet . getParamStringToPutImage ( txid , machine , port , storage ) ; LOG . info ( "Image upload: Posted URL " + fsName + fileid ) ; TransferFsImage . getFileClient ( fsName , fileid , null , storage , false ) ; LOG . info ( "Uploaded image with txid " + txid + " to namenode at " + fsName ) ; } | Requests that the NameNode download an image from this node . |
33,431 | public static void getFileServer ( OutputStream outstream , File localfile , DataTransferThrottler throttler ) throws IOException { byte buf [ ] = new byte [ BUFFER_SIZE ] ; InputStream infile = null ; try { infile = new BufferedInputStream ( new FileInputStream ( localfile ) , BUFFER_SIZE ) ; if ( InjectionHandler . falseCondition ( InjectionEvent . TRANSFERFSIMAGE_GETFILESERVER0 ) && localfile . getAbsolutePath ( ) . contains ( "secondary" ) ) { throw new IOException ( "If this exception is not caught by the " + "name-node fs image will be truncated." ) ; } if ( InjectionHandler . falseCondition ( InjectionEvent . TRANSFERFSIMAGE_GETFILESERVER1 ) && localfile . getAbsolutePath ( ) . contains ( "fsimage" ) ) { long len = localfile . length ( ) ; buf = new byte [ ( int ) Math . min ( len / 2 , BUFFER_SIZE ) ] ; infile . read ( buf ) ; } int num = 1 ; while ( num > 0 ) { num = infile . read ( buf ) ; if ( num <= 0 ) { break ; } if ( InjectionHandler . falseCondition ( InjectionEvent . TRANSFERFSIMAGE_GETFILESERVER2 ) ) { LOG . warn ( "SIMULATING A CORRUPT BYTE IN IMAGE TRANSFER!" ) ; buf [ 0 ] ++ ; } InjectionHandler . processEvent ( InjectionEvent . TRANSFERFSIMAGE_GETFILESERVER3 ) ; outstream . write ( buf , 0 , num ) ; if ( throttler != null ) { throttler . throttle ( num ) ; } } } finally { if ( infile != null ) { infile . close ( ) ; } } } | A server - side method to respond to a getfile http request Copies the contents of the local file into the output stream . |
33,432 | public static void getFileServerForPartialFiles ( OutputStream outstream , String filename , InputStream infile , DataTransferThrottler throttler , long startPosition , long lengthToSend ) throws IOException { byte buf [ ] = new byte [ BUFFER_SIZE ] ; try { int num = 1 ; while ( num > 0 ) { num = infile . read ( buf , 0 , Math . min ( BUFFER_SIZE , ( int ) Math . min ( lengthToSend , Integer . MAX_VALUE ) ) ) ; lengthToSend -= num ; if ( num <= 0 ) { break ; } try { outstream . write ( buf , 0 , num ) ; } catch ( Exception e ) { break ; } if ( throttler != null ) { throttler . throttle ( num ) ; } } if ( lengthToSend > 0 ) { LOG . warn ( "Could not serve requested number of bytes. Left with " + lengthToSend + " bytes for file: " + filename ) ; } } finally { if ( infile != null ) { infile . close ( ) ; } } } | A server - side method to respond to a getfile http request Copies the contents of the local file into the output stream starting at the given position sending lengthToSend bytes . |
33,433 | private static int getHttpTimeout ( Storage st ) { if ( ! ( st instanceof NNStorage ) ) return DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT ; NNStorage storage = ( NNStorage ) st ; if ( storage == null || storage . getConf ( ) == null ) { return DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT ; } return storage . getConf ( ) . getInt ( DFS_IMAGE_TRANSFER_TIMEOUT_KEY , DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT ) ; } | Get connection and read timeout . |
33,434 | private static int printProgress ( String str , long received , long advertisedSize , int lastPrinted ) { if ( advertisedSize == 0 ) return 0 ; int currentPercent = ( int ) ( ( received * 100 ) / advertisedSize ) ; if ( currentPercent != lastPrinted ) FLOG . info ( "Downloading: " + str + ", completed: " + currentPercent ) ; return currentPercent ; } | Print progress when downloading files . |
33,435 | public static String reverseDns ( InetAddress hostIp , String ns ) throws NamingException { String [ ] parts = hostIp . getHostAddress ( ) . split ( "\\." ) ; String reverseIP = parts [ 3 ] + "." + parts [ 2 ] + "." + parts [ 1 ] + "." + parts [ 0 ] + ".in-addr.arpa" ; DirContext ictx = new InitialDirContext ( ) ; Attributes attribute = ictx . getAttributes ( "dns://" + ( ( ns == null ) ? "" : ns ) + "/" + reverseIP , new String [ ] { "PTR" } ) ; ictx . close ( ) ; return attribute . get ( "PTR" ) . get ( ) . toString ( ) ; } | Returns the hostname associated with the specified IP address by the provided nameserver . |
33,436 | public static String [ ] getIPs ( String strInterface ) throws UnknownHostException { try { boolean toGetIpv4 = true ; if ( System . getProperty ( PREFER_IPV6_ADDRESS_PROPERTY ) != null ) { toGetIpv4 = "false" . equals ( System . getProperty ( PREFER_IPV6_ADDRESS_PROPERTY ) ) ; } NetworkInterface netIF = NetworkInterface . getByName ( strInterface ) ; if ( netIF == null ) { return new String [ ] { InetAddress . getLocalHost ( ) . getHostAddress ( ) } ; } else { Vector < String > ips = new Vector < String > ( ) ; Enumeration < InetAddress > e = netIF . getInetAddresses ( ) ; while ( e . hasMoreElements ( ) ) { InetAddress curr = e . nextElement ( ) ; if ( toGetIpv4 && ! ( curr instanceof Inet4Address ) ) { continue ; } ips . add ( curr . getHostAddress ( ) ) ; } return ips . toArray ( new String [ ] { } ) ; } } catch ( SocketException e ) { return new String [ ] { InetAddress . getLocalHost ( ) . getHostAddress ( ) } ; } } | Returns all the IPs associated with the provided interface if any in textual form . |
33,437 | public static String getDefaultIP ( String strInterface ) throws UnknownHostException { String [ ] ips = getIPs ( strInterface ) ; return ips [ 0 ] ; } | Returns the first available IP address associated with the provided network interface |
33,438 | public static String [ ] getHosts ( String strInterface , String nameserver ) throws UnknownHostException { String [ ] ips = getIPs ( strInterface ) ; Vector < String > hosts = new Vector < String > ( ) ; for ( int ctr = 0 ; ctr < ips . length ; ctr ++ ) try { hosts . add ( reverseDns ( InetAddress . getByName ( ips [ ctr ] ) , nameserver ) ) ; } catch ( Exception e ) { } if ( hosts . size ( ) == 0 ) return new String [ ] { InetAddress . getLocalHost ( ) . getCanonicalHostName ( ) } ; else return hosts . toArray ( new String [ ] { } ) ; } | Returns all the host names associated by the provided nameserver with the address bound to the specified network interface |
33,439 | private synchronized void refreshCachedData ( ) throws IOException { IOUtils . closeStream ( committedTxnId ) ; File currentDir = journalStorage . getSingularStorageDir ( ) . getCurrentDir ( ) ; this . lastPromisedEpoch = new PersistentLongFile ( new File ( currentDir , LAST_PROMISED_FILENAME ) , 0 ) ; this . lastWriterEpoch = new PersistentLongFile ( new File ( currentDir , LAST_WRITER_EPOCH ) , 0 ) ; this . committedTxnId = new BestEffortLongFile ( new File ( currentDir , COMMITTED_TXID_FILENAME ) , HdfsConstants . INVALID_TXID ) ; metrics . lastWriterEpoch . set ( lastWriterEpoch . get ( ) ) ; } | Reload any data that may have been cached . This is necessary when we first load the Journal but also after any formatting operation since the cached data is no longer relevant . |
33,440 | private synchronized void copyMetaFilesForUpgrade ( ) throws IOException { Configuration conf = new Configuration ( ) ; File currentDir = journalStorage . getSingularStorageDir ( ) . getCurrentDir ( ) ; File prevDir = journalStorage . getSingularStorageDir ( ) . getPreviousTmp ( ) ; FileSystem fs = FileSystem . getLocal ( conf ) . getRaw ( ) ; FileUtil . copy ( new File ( prevDir , LAST_PROMISED_FILENAME ) , fs , new File ( currentDir , LAST_PROMISED_FILENAME ) , false , conf ) ; FileUtil . copy ( new File ( prevDir , LAST_WRITER_EPOCH ) , fs , new File ( currentDir , LAST_WRITER_EPOCH ) , false , conf ) ; FileUtil . copy ( new File ( prevDir , COMMITTED_TXID_FILENAME ) , fs , new File ( currentDir , COMMITTED_TXID_FILENAME ) , false , conf ) ; FileUtil . copy ( new File ( prevDir , JNStorage . PAXOS_DIR ) , fs , new File ( currentDir , JNStorage . PAXOS_DIR ) , false , conf ) ; } | After an upgrade we must ensure that the current directory still holds all epoch committed txid and paxos files that it had before we did the upgrade . |
33,441 | public void rollbackImage ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getLayoutVersion ( ) != 0 , "can't rollback with uninitialized layout version: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Rolling back image " + this . getJournalId ( ) + " with namespace info: (" + nsInfo . toColonSeparatedString ( ) + ")" ) ; imageStorage . rollback ( nsInfo ) ; } | Rollback the local image storage with the given namespace . |
33,442 | private void doUpgradeImage ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getNamespaceID ( ) != 0 , "can't upgrade with uninitialized namespace info: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Upgrading image " + this . getJournalId ( ) + " with namespace info: (" + nsInfo . toColonSeparatedString ( ) + ")" ) ; checkpointImageDigests . remove ( mostRecentCheckpointTxid ) ; imageStorage . doUpgrade ( nsInfo ) ; } | Upgrade the local image storage with the given namespace . |
33,443 | private void completeUpgradeImage ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getNamespaceID ( ) != 0 , "can't upgrade with uninitialized namespace info: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Completing Upgrading image " + this . getJournalId ( ) + " with namespace info: (" + nsInfo . toColonSeparatedString ( ) + ")" ) ; imageStorage . completeUpgrade ( nsInfo ) ; } | Complete the upgrade for local image storage with the given namespace . |
33,444 | private void completeUpgradeJournal ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getNamespaceID ( ) != 0 , "can't upgrade with uninitialized namespace info: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Completing Upgrading journal" + this . getJournalId ( ) + " with namespace info: (" + nsInfo . toColonSeparatedString ( ) + ")" ) ; journalStorage . completeUpgrade ( nsInfo ) ; } | Complete the upgrade for local journal storage with the given namespace . |
33,445 | public void rollbackJournal ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getLayoutVersion ( ) != 0 , "can't rollback with uninitialized layout version : %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Rolling back journal " + this . getJournalId ( ) + " with namespace info: (" + nsInfo . toColonSeparatedString ( ) + ")" ) ; journalStorage . rollback ( nsInfo ) ; refreshCachedData ( ) ; } | Rollback the local journal storage with the given namespace . |
33,446 | private void doUpgradeJournal ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getNamespaceID ( ) != 0 , "can't upgrade with uninitialized namespace info: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Upgrading journal " + this . getJournalId ( ) + " with namespace info: (" + nsInfo . toColonSeparatedString ( ) + ")" ) ; journalStorage . doUpgrade ( nsInfo ) ; copyMetaFilesForUpgrade ( ) ; refreshCachedData ( ) ; } | Upgrade the local journal storage with the given namespace . |
33,447 | private void recoverJournal ( StartupOption startOpt ) throws IOException { LOG . info ( "Recovering journal " + this . getJournalId ( ) ) ; journalStorage . recover ( startOpt ) ; } | Recover the local journal storage . |
33,448 | private void recoverImage ( StartupOption startOpt ) throws IOException { LOG . info ( "Recovering image" + this . getJournalId ( ) ) ; imageStorage . recover ( startOpt ) ; } | Recover the local image storage . |
33,449 | public void close ( ) throws IOException { journalStorage . close ( ) ; imageStorage . close ( ) ; IOUtils . closeStream ( committedTxnId ) ; } | Unlock and release resources . |
33,450 | synchronized NewEpochResponseProto newEpoch ( NamespaceInfo nsInfo , long epoch ) throws IOException { checkJournalStorageFormatted ( ) ; journalStorage . checkConsistentNamespace ( nsInfo ) ; if ( imageStorage . isFormatted ( ) ) { imageStorage . checkConsistentNamespace ( nsInfo ) ; } if ( epoch <= getLastPromisedEpoch ( ) ) { throw new IOException ( "Proposed epoch " + epoch + " <= last promise " + getLastPromisedEpoch ( ) ) ; } updateLastPromisedEpoch ( epoch ) ; abortCurSegment ( ) ; NewEpochResponseProto ret = new NewEpochResponseProto ( ) ; EditLogFile latestFile = scanStorageForLatestEdits ( ) ; if ( latestFile != null ) { ret . setLastSegmentTxId ( latestFile . getFirstTxId ( ) ) ; } return ret ; } | Try to create a new epoch for this journal . |
33,451 | private synchronized void checkRequest ( RequestInfo reqInfo ) throws IOException { if ( reqInfo . getEpoch ( ) < lastPromisedEpoch . get ( ) ) { throw new IOException ( "IPC's epoch " + reqInfo . getEpoch ( ) + " is less than the last promised epoch " + lastPromisedEpoch . get ( ) ) ; } else if ( reqInfo . getEpoch ( ) > lastPromisedEpoch . get ( ) ) { updateLastPromisedEpoch ( reqInfo . getEpoch ( ) ) ; } if ( reqInfo . getIpcSerialNumber ( ) <= currentEpochIpcSerial ) { checkSync ( false , "IPC serial %s from client %s was not higher than prior highest " + "IPC serial %s" , reqInfo . getIpcSerialNumber ( ) , Server . getRemoteIp ( ) , currentEpochIpcSerial ) ; } currentEpochIpcSerial = reqInfo . getIpcSerialNumber ( ) ; if ( reqInfo . hasCommittedTxId ( ) ) { if ( reqInfo . getCommittedTxId ( ) < committedTxnId . get ( ) ) { throw new IllegalArgumentException ( "Client trying to move committed txid backward from " + committedTxnId . get ( ) + " to " + reqInfo . getCommittedTxId ( ) ) ; } boolean persist = ( now ( ) - lastPersistedCommittedTxId ) > 1000 ; if ( persist ) { lastPersistedCommittedTxId = now ( ) ; } committedTxnId . set ( reqInfo . getCommittedTxId ( ) , persist ) ; } } | Ensure that the given request is coming from the correct writer and in - order . |
33,452 | public synchronized void startLogSegment ( RequestInfo reqInfo , long txid ) throws IOException { assert fjm != null ; checkJournalStorageFormatted ( ) ; checkRequest ( reqInfo ) ; if ( curSegment != null ) { LOG . warn ( "Client is requesting a new log segment " + txid + " though we are already writing " + curSegment + ". " + "Aborting the current segment in order to begin the new one." ) ; abortCurSegment ( ) ; } EditLogFile existing = fjm . getLogFile ( txid ) ; if ( existing != null ) { if ( ! existing . isInProgress ( ) ) { throw new IllegalStateException ( "Already have a finalized segment " + existing + " beginning at " + txid ) ; } existing . validateLog ( ) ; if ( existing . getLastTxId ( ) != existing . getFirstTxId ( ) ) { throw new IllegalStateException ( "The log file " + existing + " seems to contain valid transactions" ) ; } } long curLastWriterEpoch = lastWriterEpoch . get ( ) ; if ( curLastWriterEpoch != reqInfo . getEpoch ( ) ) { LOG . info ( "Updating lastWriterEpoch from " + curLastWriterEpoch + " to " + reqInfo . getEpoch ( ) + " for client " + Server . getRemoteIp ( ) ) ; lastWriterEpoch . set ( reqInfo . getEpoch ( ) ) ; metrics . lastWriterEpoch . set ( reqInfo . getEpoch ( ) ) ; } purgePaxosDecision ( txid ) ; curSegment = fjm . startLogSegment ( txid ) ; curSegmentTxId = txid ; nextTxId = txid ; currentSegmentWrittenBytes = 5L ; if ( journalNode != null ) { journalNode . addSyncTask ( this , curSegmentTxId ) ; } } | Start a new segment at the given txid . The previous segment must have already been finalized . |
33,453 | public synchronized void finalizeLogSegment ( RequestInfo reqInfo , long startTxId , long endTxId ) throws IOException { checkJournalStorageFormatted ( ) ; checkRequest ( reqInfo ) ; boolean needsValidation = true ; if ( startTxId == curSegmentTxId ) { if ( curSegment != null ) { curSegment . close ( ) ; curSegment = null ; curSegmentTxId = HdfsConstants . INVALID_TXID ; currentSegmentWrittenBytes = 0L ; } checkSync ( nextTxId == endTxId + 1 , "Trying to finalize in-progress log segment %s to end at " + "txid %s but only written up to txid %s" , startTxId , endTxId , nextTxId - 1 ) ; needsValidation = false ; } FileJournalManager . EditLogFile elf = fjm . getLogFile ( startTxId ) ; if ( elf == null ) { throw new JournalOutOfSyncException ( "No log file to finalize at " + "transaction ID " + startTxId ) ; } if ( elf . isInProgress ( ) ) { if ( needsValidation ) { LOG . info ( "Validating log segment " + elf . getFile ( ) + " about to be " + "finalized" ) ; elf . validateLog ( ) ; checkSync ( elf . getLastTxId ( ) == endTxId , "Trying to finalize in-progress log segment %s to end at " + "txid %s but log %s on disk only contains up to txid %s" , startTxId , endTxId , elf . getFile ( ) , elf . getLastTxId ( ) ) ; } fjm . finalizeLogSegment ( startTxId , endTxId ) ; } else { Preconditions . checkArgument ( endTxId == elf . getLastTxId ( ) , "Trying to re-finalize already finalized log " + elf + " with different endTxId " + endTxId ) ; } purgePaxosDecision ( elf . getFirstTxId ( ) ) ; } | Finalize the log segment at the given transaction ID . |
33,454 | private void purgePaxosDecision ( long segmentTxId ) throws IOException { File paxosFile = journalStorage . getPaxosFile ( segmentTxId ) ; if ( paxosFile . exists ( ) ) { if ( ! paxosFile . delete ( ) ) { throw new IOException ( "Unable to delete paxos file " + paxosFile ) ; } } } | Remove the previously - recorded accepted recovery information for a given log segment once it is no longer necessary . |
33,455 | File syncLog ( RequestInfo reqInfo , final SegmentStateProto segment , final URL url ) throws IOException { long startTxId = segment . getStartTxId ( ) ; long epoch = reqInfo . getEpoch ( ) ; return syncLog ( epoch , segment . getStartTxId ( ) , url , segment . toString ( ) , journalStorage . getSyncLogTemporaryFile ( startTxId , epoch ) ) ; } | Synchronize a log segment from another JournalNode . The log is downloaded from the provided URL into a temporary location on disk which is named based on the current request s epoch . |
33,456 | File syncLog ( long stamp , final long startTxId , final URL url , String name , File tmpFile ) throws IOException { final File [ ] localPaths = new File [ ] { tmpFile } ; LOG . info ( "Synchronizing log " + name + " from " + url ) ; boolean success = false ; try { TransferFsImage . doGetUrl ( url , ImageSet . convertFilesToStreams ( localPaths , journalStorage , url . toString ( ) ) , journalStorage , true ) ; assert tmpFile . exists ( ) ; success = true ; } finally { if ( ! success ) { if ( ! tmpFile . delete ( ) ) { LOG . warn ( "Failed to delete temporary file " + tmpFile ) ; } } } return tmpFile ; } | Synchronize a log segment from another JournalNode . The log is downloaded from the provided URL into a temporary location on disk |
33,457 | private void completeHalfDoneAcceptRecovery ( PersistedRecoveryPaxosData paxosData ) throws IOException { if ( paxosData == null ) { return ; } long segmentId = paxosData . getSegmentState ( ) . getStartTxId ( ) ; long epoch = paxosData . getAcceptedInEpoch ( ) ; File tmp = journalStorage . getSyncLogTemporaryFile ( segmentId , epoch ) ; if ( tmp . exists ( ) ) { File dst = journalStorage . getInProgressEditLog ( segmentId ) ; LOG . info ( "Rolling forward previously half-completed synchronization: " + tmp + " -> " + dst ) ; FileUtil . replaceFile ( tmp , dst ) ; } } | In the case the node crashes in between downloading a log segment and persisting the associated paxos recovery data the log segment will be left in its temporary location on disk . Given the paxos data we can check if this was indeed the case and " ; roll forward" ; the atomic operation . |
33,458 | private PersistedRecoveryPaxosData getPersistedPaxosData ( long segmentTxId ) throws IOException { File f = journalStorage . getPaxosFile ( segmentTxId ) ; if ( ! f . exists ( ) ) { return null ; } InputStream in = new FileInputStream ( f ) ; try { PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData . parseDelimitedFrom ( in ) ; Preconditions . checkState ( ret != null && ret . getSegmentState ( ) . getStartTxId ( ) == segmentTxId , "Bad persisted data for segment %s: %s" , segmentTxId , ret ) ; return ret ; } finally { IOUtils . closeStream ( in ) ; } } | Retrieve the persisted data for recovering the given segment from disk . |
33,459 | private void persistPaxosData ( long segmentTxId , PersistedRecoveryPaxosData newData ) throws IOException { File f = journalStorage . getPaxosFile ( segmentTxId ) ; boolean success = false ; AtomicFileOutputStream fos = new AtomicFileOutputStream ( f ) ; try { newData . writeDelimitedTo ( fos ) ; fos . write ( '\n' ) ; OutputStreamWriter writer = new OutputStreamWriter ( fos , Charsets . UTF_8 ) ; writer . write ( String . valueOf ( newData ) ) ; writer . write ( '\n' ) ; writer . flush ( ) ; fos . flush ( ) ; success = true ; } finally { if ( success ) { IOUtils . closeStream ( fos ) ; } else { fos . abort ( ) ; } } } | Persist data for recovering the given segment from disk . |
33,460 | public void saveDigestAndRenameCheckpointImage ( long txid , MD5Hash digest ) throws IOException { MD5Hash storedDigest = checkpointImageDigests . get ( txid ) ; if ( storedDigest == null || ! storedDigest . equals ( digest ) ) { throw new IOException ( "Digest of data written: " + storedDigest + " does not match requested digest: " + digest + " for txid: " + txid + ", journal: " + journalId ) ; } imageManager . saveDigestAndRenameCheckpointImage ( txid , digest ) ; checkpointImageDigests . remove ( txid ) ; } | Roll the image . |
33,461 | public static final DataTransferThrottler getThrottler ( Configuration conf , boolean disableThrottler ) { if ( disableThrottler ) { return null ; } long transferBandwidth = conf . getLong ( HdfsConstants . DFS_IMAGE_TRANSFER_RATE_KEY , HdfsConstants . DFS_IMAGE_TRANSFER_RATE_DEFAULT ) ; DataTransferThrottler throttler = null ; if ( transferBandwidth > 0 ) { throttler = new DataTransferThrottler ( transferBandwidth ) ; } return throttler ; } | Construct a throttler from conf |
33,462 | public static void setVerificationHeaders ( HttpServletResponse response , File file ) throws IOException { response . setHeader ( TransferFsImage . CONTENT_LENGTH , String . valueOf ( file . length ( ) ) ) ; MD5Hash hash = MD5FileUtils . readStoredMd5ForFile ( file ) ; if ( hash != null ) { response . setHeader ( TransferFsImage . MD5_HEADER , hash . toString ( ) ) ; } } | Set headers for content length and if available md5 . |
33,463 | public boolean reserveSpaceWithCheckSum ( Path f , long size ) { RawInMemoryFileSystem mfs = ( RawInMemoryFileSystem ) getRawFileSystem ( ) ; synchronized ( mfs ) { boolean b = mfs . reserveSpace ( f , size ) ; if ( b ) { long checksumSize = getChecksumFileLength ( f , size ) ; b = mfs . reserveSpace ( getChecksumFile ( f ) , checksumSize ) ; if ( ! b ) { mfs . unreserveSpace ( f ) ; } } return b ; } } | Register a file with its size . This will also register a checksum for the file that the user is trying to create . This is required since none of the FileSystem APIs accept the size of the file as argument . But since it is required for us to apriori know the size of the file we are going to create the user must call this method for each file he wants to create and reserve memory for that file . We either succeed in reserving memory for both the main file and the checksum file and return true or return false . |
33,464 | public static void setTheManager ( ) { if ( "true" . equalsIgnoreCase ( System . getProperty ( ACTIVATE_FLAG ) ) ) { if ( ! ( System . getSecurityManager ( ) instanceof DnsMonitorSecurityManager ) ) { System . setSecurityManager ( theManager ) ; } } } | Update s the system s security manager to an instance of this class if system property dns_call_stack_logging is set to true . |
33,465 | static ZookeeperTxId checkZooKeeperBeforeFailover ( Configuration startupConf , Configuration confg , boolean noverification ) throws IOException { AvatarZooKeeperClient zk = null ; String fsname = startupConf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; int maxTries = startupConf . getInt ( "dfs.avatarnode.zk.retries" , 3 ) ; Exception lastException = null ; for ( int i = 0 ; i < maxTries ; i ++ ) { try { zk = new AvatarZooKeeperClient ( confg , null , false ) ; LOG . info ( "Failover: Checking if the primary is empty" ) ; String zkRegistry = zk . getPrimaryAvatarAddress ( fsname , new Stat ( ) , false , i > 0 ) ; if ( zkRegistry != null ) { throw new IOException ( "Can't switch the AvatarNode to primary since " + "zookeeper record is not clean. Either use shutdownAvatar to kill " + "the current primary and clean the ZooKeeper entry, " + "or clear out the ZooKeeper entry if the primary is dead" ) ; } if ( noverification ) { return null ; } LOG . info ( "Failover: Obtaining last transaction id from ZK" ) ; String address = startupConf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; long sessionId = zk . getPrimarySsId ( address , i > 0 ) ; ZookeeperTxId zkTxId = zk . getPrimaryLastTxId ( address , i > 0 ) ; if ( sessionId != zkTxId . getSessionId ( ) ) { throw new IOException ( "Session Id in the ssid node : " + sessionId + " does not match the session Id in the txid node : " + zkTxId . getSessionId ( ) ) ; } return zkTxId ; } catch ( Exception e ) { LOG . error ( "Got Exception reading primary node registration " + "from ZooKeeper. Will retry..." , e ) ; lastException = e ; } finally { shutdownZkClient ( zk ) ; } } throw new IOException ( lastException ) ; } | Verifies whether we are in a consistent state before we perform a failover |
33,466 | static long writeToZooKeeperAfterFailover ( Configuration startupConf , Configuration confg ) throws IOException { AvatarZooKeeperClient zk = null ; String address = startupConf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; String realAddress = confg . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; int maxTries = startupConf . getInt ( "dfs.avatarnode.zk.retries" , 3 ) ; for ( int i = 0 ; i < maxTries ; i ++ ) { try { zk = new AvatarZooKeeperClient ( confg , null , false ) ; LOG . info ( "Failover: Registering to ZK as primary" ) ; final boolean toOverwrite = true ; zk . registerPrimary ( address , realAddress , toOverwrite ) ; registerClientProtocolAddress ( zk , startupConf , confg , toOverwrite ) ; registerDnProtocolAddress ( zk , startupConf , confg , toOverwrite ) ; registerHttpAddress ( zk , startupConf , confg , toOverwrite ) ; LOG . info ( "Failover: Writting session id to ZK" ) ; return writeSessionIdToZK ( startupConf , zk ) ; } catch ( Exception e ) { LOG . error ( "Got Exception registering the new primary " + "with ZooKeeper. Will retry..." , e ) ; } finally { shutdownZkClient ( zk ) ; } } throw new IOException ( "Cannot connect to zk" ) ; } | Performs some operations after failover such as writing a new session id and registering to zookeeper as the new primary . |
33,467 | static void writeLastTxidToZookeeper ( long lastTxid , long totalBlocks , long totalInodes , long ssid , Configuration startupConf , Configuration confg ) throws IOException { AvatarZooKeeperClient zk = null ; LOG . info ( "Writing lastTxId: " + lastTxid + ", total blocks: " + totalBlocks + ", total inodes: " + totalInodes ) ; if ( lastTxid < 0 ) { LOG . warn ( "Invalid last transaction id : " + lastTxid + " skipping write to zookeeper." ) ; return ; } ZookeeperTxId zkTxid = new ZookeeperTxId ( ssid , lastTxid , totalBlocks , totalInodes ) ; int maxTries = startupConf . getInt ( "dfs.avatarnode.zk.retries" , 3 ) ; for ( int i = 0 ; i < maxTries ; i ++ ) { try { zk = new AvatarZooKeeperClient ( confg , null , false ) ; zk . registerLastTxId ( startupConf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) , zkTxid ) ; return ; } catch ( Exception e ) { LOG . error ( "Got Exception when syncing last txid to zk. Will retry..." , e ) ; } finally { shutdownZkClient ( zk ) ; } } throw new IOException ( "Cannot connect to zk" ) ; } | Writes the last transaction id of the primary avatarnode to zookeeper . |
33,468 | static String getPrimaryRegistration ( Configuration startupConf , Configuration conf , String fsname ) throws IOException { AvatarZooKeeperClient zk = null ; int maxTries = startupConf . getInt ( "dfs.avatarnode.zk.retries" , 3 ) ; for ( int i = 0 ; i < maxTries ; i ++ ) { try { zk = new AvatarZooKeeperClient ( conf , null , false ) ; String zkRegistry = zk . getPrimaryAvatarAddress ( fsname , new Stat ( ) , false ) ; return zkRegistry ; } catch ( Exception e ) { LOG . error ( "Got Exception when reading primary registration. Will retry..." , e ) ; } finally { shutdownZkClient ( zk ) ; } } throw new IOException ( "Cannot connect to zk" ) ; } | Obtain the registration of the primary from zk . |
33,469 | private static boolean registerClientProtocolAddress ( AvatarZooKeeperClient zk , Configuration originalConf , Configuration conf , boolean toOverwrite ) throws UnsupportedEncodingException , IOException { LOG . info ( "Updating Client Address information in ZooKeeper" ) ; InetSocketAddress addr = NameNode . getClientProtocolAddress ( conf ) ; if ( addr == null ) { LOG . error ( FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY + " for primary service is not defined" ) ; return true ; } InetSocketAddress defaultAddr = NameNode . getClientProtocolAddress ( originalConf ) ; if ( defaultAddr == null ) { LOG . error ( FSConstants . DFS_NAMENODE_RPC_ADDRESS_KEY + " for default service is not defined" ) ; return true ; } registerSocketAddress ( zk , originalConf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) , conf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) , toOverwrite ) ; registerAliases ( zk , conf , FSConstants . FS_NAMENODE_ALIASES , conf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) , toOverwrite ) ; return false ; } | Registers namenode s address in zookeeper |
33,470 | private static void registerDnProtocolAddress ( AvatarZooKeeperClient zk , Configuration originalConf , Configuration conf , boolean toOverwrite ) throws UnsupportedEncodingException , IOException { LOG . info ( "Updating Service Address information in ZooKeeper" ) ; registerSocketAddress ( zk , originalConf . get ( NameNode . DATANODE_PROTOCOL_ADDRESS ) , conf . get ( NameNode . DATANODE_PROTOCOL_ADDRESS ) , toOverwrite ) ; registerAliases ( zk , conf , FSConstants . DFS_NAMENODE_DN_ALIASES , conf . get ( NameNode . DATANODE_PROTOCOL_ADDRESS ) , toOverwrite ) ; } | Registers the datanode protocol address in the zookeeper |
33,471 | private static void registerHttpAddress ( AvatarZooKeeperClient zk , Configuration originalConf , Configuration conf , boolean toOverwrite ) throws UnsupportedEncodingException , IOException { LOG . info ( "Updating Http Address information in ZooKeeper" ) ; String addr = conf . get ( FSConstants . DFS_NAMENODE_HTTP_ADDRESS_KEY ) ; String defaultAddr = originalConf . get ( FSConstants . DFS_NAMENODE_HTTP_ADDRESS_KEY ) ; registerSocketAddress ( zk , defaultAddr , addr , toOverwrite ) ; registerAliases ( zk , conf , FSConstants . DFS_HTTP_ALIASES , addr , toOverwrite ) ; } | Registers the http address of the namenode in the zookeeper |
33,472 | private synchronized void load ( ) { Map < String , HadoopServer > map = new TreeMap < String , HadoopServer > ( ) ; for ( File file : saveDir . listFiles ( ) ) { try { HadoopServer server = new HadoopServer ( file ) ; map . put ( server . getLocationName ( ) , server ) ; } catch ( Exception exn ) { System . err . println ( exn ) ; } } this . servers = map ; } | Load all available locations from the workspace configuration directory . |
33,473 | public synchronized void updateServer ( String originalName , HadoopServer server ) { if ( ! server . getLocationName ( ) . equals ( originalName ) ) { servers . remove ( originalName ) ; servers . put ( server . getLocationName ( ) , server ) ; } store ( ) ; fireListeners ( server , SERVER_STATE_CHANGED ) ; } | Update one Hadoop location |
33,474 | synchronized public boolean selectInputStreams ( Collection < EditLogInputStream > streams , long fromTxId , boolean inProgressOk , boolean validateInProgressSegments , int minRedundancy ) throws IOException { final PriorityQueue < EditLogInputStream > allStreams = new PriorityQueue < EditLogInputStream > ( 64 , EDIT_LOG_INPUT_STREAM_COMPARATOR ) ; for ( JournalAndStream jas : journals ) { if ( jas . isDisabled ( ) ) { LOG . info ( "Skipping jas " + jas + " since it's disabled" ) ; continue ; } try { jas . getManager ( ) . selectInputStreams ( allStreams , fromTxId , inProgressOk , validateInProgressSegments ) ; } catch ( IOException ioe ) { LOG . warn ( "Unable to determine input streams from " + jas . getManager ( ) + ". Skipping." , ioe ) ; } } return chainAndMakeRedundantStreams ( streams , allStreams , fromTxId , inProgressOk , minRedundancy ) ; } | Selects input streams . Returns true if each stream meets min redundancy false otherwise . |
33,475 | synchronized boolean hasUnfinalizedSegments ( long fromTxId ) { List < EditLogInputStream > streams = new ArrayList < EditLogInputStream > ( ) ; for ( JournalAndStream jas : journals ) { if ( jas . isDisabled ( ) ) { continue ; } try { jas . getManager ( ) . selectInputStreams ( streams , fromTxId , true , false ) ; for ( EditLogInputStream elis : streams ) { if ( elis . isInProgress ( ) ) { return true ; } } } catch ( IOException ioe ) { LOG . warn ( "Unable to determine input streams from " + jas . getManager ( ) + ". Skipping." , ioe ) ; } } return false ; } | Check if any journal manager has unfinalized segments . |
33,476 | private static boolean isLocalJournal ( JournalManager jm ) { if ( jm == null || ( ! ( jm instanceof FileJournalManager ) ) ) { return false ; } return NNStorage . isPreferred ( StorageLocationType . LOCAL , ( ( FileJournalManager ) jm ) . getStorageDirectory ( ) ) ; } | Check if the given journal is local . |
33,477 | private void disableAndReportErrorOnJournals ( List < JournalAndStream > badJournals , String status ) throws IOException { if ( badJournals == null || badJournals . isEmpty ( ) ) { if ( forceJournalCheck ) { forceJournalCheck = false ; checkJournals ( status ) ; } return ; } for ( JournalAndStream j : badJournals ) { LOG . error ( "Disabling journal " + j ) ; j . abort ( ) ; j . setDisabled ( true ) ; if ( j . journal instanceof FileJournalManager ) { FileJournalManager fjm = ( FileJournalManager ) j . journal ; storage . reportErrorsOnDirectory ( fjm . getStorageDirectory ( ) , image ) ; } if ( j . journal instanceof ImageManager ) { ImageManager im = ( ImageManager ) j . journal ; im . setImageDisabled ( true ) ; } } if ( image != null ) { image . updateImageMetrics ( ) ; } checkJournals ( status ) ; } | Called when some journals experience an error in some operation . |
33,478 | private void mapJournalsAndReportErrors ( JournalClosure closure , String status ) throws IOException { List < JournalAndStream > badJAS = null ; for ( JournalAndStream jas : journals ) { try { closure . apply ( jas ) ; } catch ( Throwable t ) { if ( badJAS == null ) badJAS = new LinkedList < JournalAndStream > ( ) ; LOG . error ( "Error: " + status + " failed for (journal " + jas + ")" , t ) ; badJAS . add ( jas ) ; } } disableAndReportErrorOnJournals ( badJAS , status ) ; } | Apply the given operation across all of the journal managers disabling any for which the closure throws an IOException . |
33,479 | private void mapJournalsAndReportErrorsParallel ( JournalClosure closure , String status ) throws IOException { List < Future < JournalAndStream > > jasResponeses = new ArrayList < Future < JournalAndStream > > ( journals . size ( ) ) ; for ( JournalAndStream jas : journals ) { jasResponeses . add ( executor . submit ( new JournalSetWorker ( jas , closure , status ) ) ) ; } List < JournalAndStream > badJAS = null ; for ( Future < JournalAndStream > future : jasResponeses ) { JournalAndStream jas = null ; try { jas = future . get ( ) ; } catch ( ExecutionException e ) { throw new IOException ( "This should never happen!!!" , e ) ; } catch ( InterruptedException e ) { throw new IOException ( "Interrupted whe performing journal operations" , e ) ; } if ( jas == null ) continue ; if ( badJAS == null ) badJAS = new LinkedList < JournalAndStream > ( ) ; badJAS . add ( jas ) ; } disableAndReportErrorOnJournals ( badJAS , status ) ; } | Apply the given operation across all of the journal managers disabling any for which the closure throws an IOException . Do it in parallel . |
33,480 | void updateJournalMetrics ( ) { if ( metrics == null ) { return ; } int failedJournals = 0 ; for ( JournalAndStream jas : journals ) { if ( jas . isDisabled ( ) ) { failedJournals ++ ; } } metrics . journalsFailed . set ( failedJournals ) ; } | Get the number of available journals . |
33,481 | protected int checkJournals ( String status ) throws IOException { boolean abort = false ; int journalsAvailable = 0 ; int nonLocalJournalsAvailable = 0 ; for ( JournalAndStream jas : journals ) { if ( jas . isDisabled ( ) && jas . isRequired ( ) ) { abort = true ; } else if ( jas . isResourceAvailable ( ) ) { journalsAvailable ++ ; if ( jas . isRemote ( ) || jas . isShared ( ) ) { nonLocalJournalsAvailable ++ ; } } } updateJournalMetrics ( ) ; if ( abort || journalsAvailable < minimumNumberOfJournals || nonLocalJournalsAvailable < minimumNumberOfNonLocalJournals ) { forceJournalCheck = true ; String message = status + " failed for too many journals, minimum: " + minimumNumberOfJournals + " current: " + journalsAvailable + ", non-local: " + minimumNumberOfNonLocalJournals + " current: " + nonLocalJournalsAvailable ; LOG . error ( message ) ; throw new IOException ( message ) ; } return journalsAvailable ; } | Checks if the number of journals available is not below minimum . Only invoked at errors . |
33,482 | public synchronized RemoteEditLogManifest getEditLogManifest ( long fromTxId ) { List < RemoteEditLog > allLogs = new ArrayList < RemoteEditLog > ( ) ; for ( JournalAndStream j : journals ) { JournalManager jm = j . getManager ( ) ; try { allLogs . addAll ( jm . getEditLogManifest ( fromTxId ) . getLogs ( ) ) ; } catch ( Throwable t ) { LOG . warn ( "Cannot list edit logs in " + jm , t ) ; } } ImmutableListMultimap < Long , RemoteEditLog > logsByStartTxId = Multimaps . index ( allLogs , RemoteEditLog . GET_START_TXID ) ; long curStartTxId = fromTxId ; List < RemoteEditLog > logs = new ArrayList < RemoteEditLog > ( ) ; while ( true ) { ImmutableList < RemoteEditLog > logGroup = logsByStartTxId . get ( curStartTxId ) ; if ( logGroup . isEmpty ( ) ) { SortedSet < Long > startTxIds = new TreeSet < Long > ( logsByStartTxId . keySet ( ) ) ; startTxIds = startTxIds . tailSet ( curStartTxId ) ; if ( startTxIds . isEmpty ( ) ) { break ; } else { if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Found gap in logs at " + curStartTxId + ": " + "not returning previous logs in manifest." ) ; } logs . clear ( ) ; curStartTxId = startTxIds . first ( ) ; continue ; } } RemoteEditLog bestLog = Collections . max ( logGroup ) ; logs . add ( bestLog ) ; curStartTxId = bestLog . getEndTxId ( ) + 1 ; if ( curStartTxId == 0 ) break ; } RemoteEditLogManifest ret = new RemoteEditLogManifest ( logs ) ; if ( LOG . isDebugEnabled ( ) ) { LOG . debug ( "Generated manifest for logs since " + fromTxId + ":" + ret ) ; } return ret ; } | Return a manifest of what edit logs are available . All available edit logs are returned starting from the transaction id passed including inprogress segments . |
33,483 | String getSyncTimes ( ) { StringBuilder buf = new StringBuilder ( ) ; for ( JournalAndStream jas : journals ) { if ( jas . isActive ( ) ) { buf . append ( jas . getCurrentStream ( ) . getTotalSyncTime ( ) ) ; buf . append ( " " ) ; } } return buf . toString ( ) ; } | Add sync times to the buffer . |
33,484 | public void transitionNonFileJournals ( StorageInfo nsInfo , boolean checkEmpty , Transition transition , StartupOption startOpt ) throws IOException { for ( JournalManager jm : getJournalManagers ( ) ) { if ( ! ( jm instanceof FileJournalManager ) ) { if ( checkEmpty && jm . hasSomeJournalData ( ) ) { LOG . warn ( "Journal " + jm + " is not empty." ) ; continue ; } LOG . info ( transition + ": " + jm ) ; jm . transitionJournal ( nsInfo , transition , startOpt ) ; } } } | Transition the non - file journals . |
33,485 | public static EditLogInputStream getInputStream ( JournalManager jm , long txid ) throws IOException { List < EditLogInputStream > streams = new ArrayList < EditLogInputStream > ( ) ; jm . selectInputStreams ( streams , txid , true , false ) ; if ( streams . size ( ) < 1 ) { throw new IOException ( "Cannot obtain stream for txid: " + txid ) ; } Collections . sort ( streams , JournalSet . EDIT_LOG_INPUT_STREAM_COMPARATOR ) ; if ( txid == HdfsConstants . INVALID_TXID ) { return streams . get ( 0 ) ; } for ( EditLogInputStream elis : streams ) { if ( elis . getFirstTxId ( ) == txid ) { return elis ; } } throw new IOException ( "Cannot obtain stream for txid: " + txid ) ; } | Get input stream from the given journal starting at txid . Does not perform validation of the streams . |
33,486 | public List < JournalManager > getNonFileJournalManagers ( ) { List < JournalManager > list = new ArrayList < JournalManager > ( ) ; for ( JournalManager jm : getJournalManagers ( ) ) { if ( ! ( jm instanceof FileJournalManager ) ) { list . add ( jm ) ; } } return list ; } | Return all non - file journal managers . |
33,487 | void attemptRestoreRemovedStorage ( ) { if ( removedStorageDirs . size ( ) == 0 ) return ; synchronized ( this . restorationLock ) { LOG . info ( "attemptRestoreRemovedStorage: check removed(failed) " + "storage. removedStorages size = " + removedStorageDirs . size ( ) ) ; for ( Iterator < StorageDirectory > it = this . removedStorageDirs . iterator ( ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; File root = sd . getRoot ( ) ; LOG . info ( "attemptRestoreRemovedStorage: currently disabled dir " + root . getAbsolutePath ( ) + "; type=" + sd . getStorageDirType ( ) + ";canwrite=" + root . canWrite ( ) ) ; try { if ( root . exists ( ) && root . canWrite ( ) ) { LOG . info ( "attemptRestoreRemovedStorage: restoring dir " + sd . getRoot ( ) . getAbsolutePath ( ) ) ; this . addStorageDir ( sd ) ; it . remove ( ) ; sd . lock ( ) ; } } catch ( IOException e ) { LOG . warn ( "attemptRestoreRemovedStorage: failed to restore " + sd . getRoot ( ) . getAbsolutePath ( ) , e ) ; } } } } | See if any of removed storages is writable again and can be returned into service . |
33,488 | StorageDirectory getStorageDirectory ( URI uri ) { try { uri = Util . fileAsURI ( new File ( uri ) ) ; Iterator < StorageDirectory > it = dirIterator ( ) ; for ( ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; if ( Util . fileAsURI ( sd . getRoot ( ) ) . equals ( uri ) ) { return sd ; } } } catch ( IOException ioe ) { LOG . warn ( "Error converting file to URI" , ioe ) ; } return null ; } | Return the storage directory corresponding to the passed URI |
33,489 | private static void checkSchemeConsistency ( URI u ) throws IOException { String scheme = u . getScheme ( ) ; if ( scheme == null ) { throw new IOException ( "Undefined scheme for " + u ) ; } } | Checks the consistency of a URI in particular if the scheme is specified |
33,490 | int getNumStorageDirs ( NameNodeDirType dirType ) { if ( dirType == null ) return getNumStorageDirs ( ) ; Iterator < StorageDirectory > it = dirIterator ( dirType ) ; int numDirs = 0 ; for ( ; it . hasNext ( ) ; it . next ( ) ) numDirs ++ ; return numDirs ; } | Return number of storage directories of the given type . |
33,491 | Collection < File > getDirectories ( NameNodeDirType dirType ) throws IOException { ArrayList < File > list = new ArrayList < File > ( ) ; Iterator < StorageDirectory > it = ( dirType == null ) ? dirIterator ( ) : dirIterator ( dirType ) ; for ( ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; list . add ( sd . getRoot ( ) ) ; } return list ; } | Return the list of locations being used for a specific purpose . i . e . Image or edit log storage . |
33,492 | static long readTransactionIdFile ( StorageDirectory sd ) throws IOException { File txidFile = getStorageFile ( sd , NameNodeFile . SEEN_TXID ) ; long txid = 0L ; if ( txidFile . exists ( ) && txidFile . canRead ( ) ) { BufferedReader br = new BufferedReader ( new FileReader ( txidFile ) ) ; try { txid = Long . valueOf ( br . readLine ( ) ) ; br . close ( ) ; br = null ; } finally { IOUtils . cleanup ( LOG , br ) ; } } return txid ; } | Determine the last transaction ID noted in this storage directory . This txid is stored in a special seen_txid file since it might not correspond to the latest image or edit log . For example an image - only directory will have this txid incremented when edits logs roll even though the edits logs are in a different directory . |
33,493 | void writeTransactionIdFile ( StorageDirectory sd , long txid ) throws IOException { if ( txid < - 1 ) { throw new IOException ( "Bad txid: " + txid ) ; } File txIdFile = getStorageFile ( sd , NameNodeFile . SEEN_TXID ) ; OutputStream fos = new AtomicFileOutputStream ( txIdFile ) ; try { fos . write ( String . valueOf ( txid ) . getBytes ( ) ) ; fos . write ( '\n' ) ; fos . close ( ) ; fos = null ; } finally { IOUtils . cleanup ( LOG , fos ) ; } } | Write last checkpoint time into a separate file . |
33,494 | public void writeTransactionIdFileToStorage ( long txid , FSImage image ) throws IOException { List < StorageDirectory > badSDs = new ArrayList < StorageDirectory > ( ) ; for ( StorageDirectory sd : storageDirs ) { try { writeTransactionIdFile ( sd , txid ) ; } catch ( IOException e ) { LOG . warn ( "writeTransactionIdToStorage failed on " + sd , e ) ; badSDs . add ( sd ) ; } } reportErrorsOnDirectories ( badSDs , image ) ; if ( image != null ) { } } | Write a small file in all available storage directories that indicates that the namespace has reached some given transaction ID . |
33,495 | public File [ ] getFsImageNameCheckpoint ( long txid ) { ArrayList < File > list = new ArrayList < File > ( ) ; for ( Iterator < StorageDirectory > it = dirIterator ( NameNodeDirType . IMAGE ) ; it . hasNext ( ) ; ) { list . add ( getStorageFile ( it . next ( ) , NameNodeFile . IMAGE_NEW , txid ) ) ; } return list . toArray ( new File [ list . size ( ) ] ) ; } | Return the name of the image file that is uploaded by periodic checkpointing |
33,496 | public File getFsImageName ( StorageLocationType type , long txid ) { File lastCandidate = null ; for ( Iterator < StorageDirectory > it = dirIterator ( NameNodeDirType . IMAGE ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; File fsImage = getStorageFile ( sd , NameNodeFile . IMAGE , txid ) ; if ( sd . getRoot ( ) . canRead ( ) && fsImage . exists ( ) ) { if ( isPreferred ( type , sd ) ) { return fsImage ; } lastCandidate = fsImage ; } } return lastCandidate ; } | Return the name of the image file preferring type images . Otherwise return any image . |
33,497 | public void format ( ) throws IOException { this . layoutVersion = FSConstants . LAYOUT_VERSION ; this . namespaceID = newNamespaceID ( ) ; this . cTime = 0L ; for ( Iterator < StorageDirectory > it = dirIterator ( ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; format ( sd ) ; } } | Format all available storage directories . |
33,498 | static int newNamespaceID ( ) { Random r = new Random ( ) ; r . setSeed ( FSNamesystem . now ( ) ) ; int newID = 0 ; while ( newID == 0 ) newID = r . nextInt ( 0x7FFFFFFF ) ; return newID ; } | Generate new namespaceID . |
33,499 | String getDeprecatedProperty ( String prop ) { assert getLayoutVersion ( ) > FSConstants . LAYOUT_VERSION : "getDeprecatedProperty should only be done when loading " + "storage from past versions during upgrade." ; return deprecatedProperties . get ( prop ) ; } | Return a property that was stored in an earlier version of HDFS . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.