idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
33,400 | private static double totalShareWithRatio ( Collection < ? extends Schedulable > schedulables , double weightToShareRatio ) { double totalShare = 0 ; for ( Schedulable schedulable : schedulables ) { totalShare += shareWithRatio ( schedulable , weightToShareRatio ) ; } return totalShare ; } | Compute the total share of the schedulables given a weightToShareRatio |
33,401 | private static double shareWithRatio ( Schedulable schedulable , double weightToShareRatio ) { double share = schedulable . getWeight ( ) * weightToShareRatio ; int min = schedulable . getMinimum ( ) ; int max = schedulable . getMaximum ( ) ; int requested = schedulable . getRequested ( ) ; share = Math . max ( min , s... | Get the share of the schedulable given a weightToShareRatio . This takes into account the min and the max allocation and is used to compute the weightToShareRatio globally |
33,402 | private static double assignShareIfUnderAllocated ( double totalShare , final Collection < ? extends Schedulable > schedulables ) { double totalMinDemand = 0 ; for ( Schedulable schedulable : schedulables ) { schedulable . share = 0 ; totalMinDemand += Math . min ( schedulable . getRequested ( ) , schedulable . getMini... | Assign fair share if total share < min demand . |
33,403 | private static TreeMap < Integer , Vector < Schedulable > > generatePriorityGroupedSchedulables ( final Collection < ? extends Schedulable > schedulables ) { TreeMap < Integer , Vector < Schedulable > > prioritizedSchedulableMap = new TreeMap < Integer , Vector < Schedulable > > ( ) ; for ( Schedulable schedulable : sc... | Group a collection of schedulables into priority groups sorted by priority . |
33,404 | private static void distributeSharePriority ( double total , final Collection < ? extends Schedulable > schedulables ) { double residualShare = assignShareIfUnderAllocated ( total , schedulables ) ; if ( residualShare <= 0.0 ) { return ; } TreeMap < Integer , Vector < Schedulable > > prioritizedSchedulableMap = generat... | Distribute the total share among the list of schedulables according to the PRIORITY model . Note that the eventual intent is to determine if preemption is necessary . So we need to assign the most ideal share on each schedulable beyond which preemption is necessary . Finds a way to distribute the share in such a way th... |
33,405 | private static void distributeShareMin ( Collection < ? extends Schedulable > schedulableVector ) { for ( Schedulable schedulable : schedulableVector ) { schedulable . share += schedulable . getMinimum ( ) ; } } | Increment each of the schedulables share by the minimum demand . Note that each pool has to be guaranteed at least its MIN value according to PRIORITY scheduling . |
33,406 | private static void distributeShareMax ( Collection < ? extends Schedulable > schedulableVector ) { for ( Schedulable schedulable : schedulableVector ) { double minShare = Math . max ( schedulable . getMinimum ( ) , schedulable . getRequested ( ) ) ; schedulable . share += Math . min ( schedulable . getMaximum ( ) , mi... | Increment each of the schedulables share by the lesser of its min demand or its maximum . |
33,407 | private int parseMonth ( String month ) { for ( int i = 0 ; i < months . length ; i ++ ) if ( months [ i ] . startsWith ( month ) ) return i ; return - 1 ; } | Convert the name of a month to the corresponding int value . |
33,408 | long writeFixedBlock ( FSDataInputStream [ ] inputs , int [ ] erasedLocations , int erasedLocationToFix , long limit , OutputStream out , Progressable reporter , ParallelStreamReader parallelReader , CRC32 crc ) throws IOException { LOG . info ( "Need to write " + limit + " bytes for erased location index " + erasedLoc... | Decode the inputs provided and write to the output . |
33,409 | public static Configuration mergeConfiguration ( URI uri , Configuration conf ) throws IOException { try { Long lastBadAccess = badURIs . get ( uri . getHost ( ) ) ; if ( lastBadAccess != null ) { if ( System . currentTimeMillis ( ) - lastBadAccess < BAD_URI_EXPIRY ) { return conf ; } else { badURIs . remove ( uri . ge... | Retrieves a modified version of the configuration from the supplied configuration . This is used to override the configuration supplied with the client side configuration retried from a different source . |
33,410 | public static JVMId forName ( String str ) throws IllegalArgumentException { if ( str == null ) return null ; try { String [ ] parts = str . split ( "_" ) ; if ( parts . length == 5 ) { if ( parts [ 0 ] . equals ( JVM ) ) { boolean isMap = false ; if ( parts [ 3 ] . equals ( "m" ) ) isMap = true ; else if ( parts [ 3 ]... | Construct a JVMId object from given string |
33,411 | private String [ ] readFile ( String fileName ) throws IOException { ArrayList < String > result = new ArrayList < String > ( ) ; FileReader fReader = new FileReader ( fileName ) ; BufferedReader bReader = new BufferedReader ( fReader ) ; while ( true ) { String line = bReader . readLine ( ) ; if ( line == null ) { bre... | Read a file line by line |
33,412 | protected String [ ] getPS ( ) { ShellCommandExecutor shellExecutor = new ShellCommandExecutor ( CMD ) ; try { shellExecutor . execute ( ) ; } catch ( IOException e ) { LOG . error ( StringUtils . stringifyException ( e ) ) ; return null ; } return shellExecutor . getOutput ( ) . split ( "\n" ) ; } | Execute ps - eo pid ppid pcpu rss command |
33,413 | private String [ ] [ ] parsePS ( String [ ] psStrings ) { String [ ] [ ] result = new String [ psStrings . length - 1 ] [ NUM_FIELDS ] ; for ( int i = 1 ; i < psStrings . length ; i ++ ) { Matcher matcher = psPattern . matcher ( psStrings [ i ] ) ; if ( matcher . find ( ) ) { for ( int j = 0 ; j < NUM_FIELDS ; j ++ ) {... | Parse PS results into fields |
33,414 | private double percentageToGHz ( double cpuUsage ) { cpuUsage /= 100 ; cpuUsage /= ttUtilization . getNumCpu ( ) ; cpuUsage *= ttUtilization . getCpuTotalGHz ( ) ; return cpuUsage ; } | ps - eo pcpu gives per core % . We convert it to GHz |
33,415 | private double [ ] getSubProcessUsage ( String pid , Map < String , String [ ] > pidToContent , Map < String , LinkedList < String > > pidToChildPid ) { double cpuMemUsage [ ] = new double [ 2 ] ; Queue < String > pidQueue = new LinkedList < String > ( ) ; pidQueue . add ( pid ) ; while ( ! pidQueue . isEmpty ( ) ) { p... | A function computes the Memory and CPU usage of all subprocess |
33,416 | void clear ( ) { for ( int i = 0 ; i < LEVEL ; i ++ ) { priorityQueues . get ( i ) . clear ( ) ; } raidQueue . clear ( ) ; } | Empty the queues . |
33,417 | synchronized int size ( int priority ) { if ( priority < 0 || priority >= LEVEL ) { throw new IllegalArgumentException ( "Unsupported priority: " + priority ) ; } return priorityQueues . get ( priority ) . size ( ) ; } | Return the number of under replication blocks of priority |
33,418 | synchronized int getSize ( int priority ) { int size = 0 ; for ( int i = priority ; i < LEVEL ; i ++ ) { size += priorityQueues . get ( i ) . size ( ) ; } return size ; } | get the number of under replicated blocks with equal or higher priority |
33,419 | public void removeNode ( ClusterNode node ) { String host = node . getHost ( ) ; NodeContainer container = hostToRunnableNode . get ( host ) ; if ( container != null ) { if ( container . removeNode ( node ) ) { runnableNodeCount -- ; } if ( container . isEmpty ( ) ) { hostToRunnableNode . remove ( host ) ; } } Node rac... | Remove a node from the snapshot . |
33,420 | void visit ( ImageElement element , int value ) throws IOException { visit ( element , Integer . toString ( value ) ) ; } | Convenience methods to automatically convert numeric value types to strings |
33,421 | void visitEnclosingElement ( ImageElement element , ImageElement key , int value ) throws IOException { visitEnclosingElement ( element , key , Integer . toString ( value ) ) ; } | Convenience methods to automatically convert value types to strings |
33,422 | public void addSession ( String id , Session session ) { for ( SchedulerForType scheduleThread : schedulersForTypes . values ( ) ) { scheduleThread . addSession ( id , session ) ; } } | Add a session for scheduling . |
33,423 | public void start ( ) { for ( Thread schedulerForType : schedulersForTypes . values ( ) ) { LOG . info ( "Starting " + schedulerForType . getName ( ) ) ; schedulerForType . start ( ) ; } configManager . start ( ) ; } | Start the scheduling as well as the reloadable configuration manager . |
33,424 | public void close ( ) { for ( SchedulerForType scheduleThread : schedulersForTypes . values ( ) ) { scheduleThread . close ( ) ; } for ( Thread scheduleThread : schedulersForTypes . values ( ) ) { Utilities . waitThreadTermination ( scheduleThread ) ; } configManager . close ( ) ; } | Stop the scheduling and disable the reloadable configuration manager . |
33,425 | public Map < PoolInfo , PoolInfoMetrics > getPoolInfoMetrics ( ResourceType type ) { return schedulersForTypes . get ( type ) . getPoolInfoMetrics ( ) ; } | Get an unmodifiable snapshot of the mapping from pool info to associated metrics . |
33,426 | public List < PoolInfo > getPoolInfos ( ) { Set < PoolInfo > poolInfos = new HashSet < PoolInfo > ( ) ; for ( ResourceType type : types ) { poolInfos . addAll ( getPoolInfoMetrics ( type ) . keySet ( ) ) ; } List < PoolInfo > result = new ArrayList < PoolInfo > ( ) ; result . addAll ( poolInfos ) ; Collections . sort (... | Get a snapshot of the pool infos that is sorted . |
33,427 | public void submitMetrics ( MetricsRecord metricsRecord ) { List < PoolMetadata > poolMetadatas = getPoolMetadataList ( ) ; PoolFairnessCalculator . calculateFairness ( poolMetadatas , metricsRecord ) ; for ( SchedulerForType scheduler : schedulersForTypes . values ( ) ) { scheduler . submitMetrics ( ) ; } } | Submit the metrics . |
33,428 | static MD5Hash downloadImageToStorage ( String fsName , long imageTxId , FSImage fsImage , boolean needDigest ) throws IOException { return downloadImageToStorage ( fsName , imageTxId , fsImage , needDigest , false ) ; } | Download image to local storage with throttling . |
33,429 | static MD5Hash downloadImageToStorage ( String fsName , long imageTxId , FSImage dstImage , boolean needDigest , boolean disableThrottle ) throws IOException { String fileid = GetImageServlet . getParamStringForImage ( imageTxId , dstImage . storage , disableThrottle ) ; List < OutputStream > outputStreams = dstImage .... | Download image to local storage . Throttling can be disabled . |
33,430 | static void uploadImageFromStorage ( String fsName , String machine , int port , NNStorage storage , long txid ) throws IOException { String fileid = GetImageServlet . getParamStringToPutImage ( txid , machine , port , storage ) ; LOG . info ( "Image upload: Posted URL " + fsName + fileid ) ; TransferFsImage . getFileC... | Requests that the NameNode download an image from this node . |
33,431 | public static void getFileServer ( OutputStream outstream , File localfile , DataTransferThrottler throttler ) throws IOException { byte buf [ ] = new byte [ BUFFER_SIZE ] ; InputStream infile = null ; try { infile = new BufferedInputStream ( new FileInputStream ( localfile ) , BUFFER_SIZE ) ; if ( InjectionHandler . f... | A server - side method to respond to a getfile http request Copies the contents of the local file into the output stream . |
33,432 | public static void getFileServerForPartialFiles ( OutputStream outstream , String filename , InputStream infile , DataTransferThrottler throttler , long startPosition , long lengthToSend ) throws IOException { byte buf [ ] = new byte [ BUFFER_SIZE ] ; try { int num = 1 ; while ( num > 0 ) { num = infile . read ( buf , ... | A server - side method to respond to a getfile http request Copies the contents of the local file into the output stream starting at the given position sending lengthToSend bytes . |
33,433 | private static int getHttpTimeout ( Storage st ) { if ( ! ( st instanceof NNStorage ) ) return DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT ; NNStorage storage = ( NNStorage ) st ; if ( storage == null || storage . getConf ( ) == null ) { return DFS_IMAGE_TRANSFER_TIMEOUT_DEFAULT ; } return storage . getConf ( ) . getInt ( DFS_I... | Get connection and read timeout . |
33,434 | private static int printProgress ( String str , long received , long advertisedSize , int lastPrinted ) { if ( advertisedSize == 0 ) return 0 ; int currentPercent = ( int ) ( ( received * 100 ) / advertisedSize ) ; if ( currentPercent != lastPrinted ) FLOG . info ( "Downloading: " + str + ", completed: " + currentPerce... | Print progress when downloading files . |
33,435 | public static String reverseDns ( InetAddress hostIp , String ns ) throws NamingException { String [ ] parts = hostIp . getHostAddress ( ) . split ( "\\." ) ; String reverseIP = parts [ 3 ] + "." + parts [ 2 ] + "." + parts [ 1 ] + "." + parts [ 0 ] + ".in-addr.arpa" ; DirContext ictx = new InitialDirContext ( ) ; Attr... | Returns the hostname associated with the specified IP address by the provided nameserver . |
33,436 | public static String [ ] getIPs ( String strInterface ) throws UnknownHostException { try { boolean toGetIpv4 = true ; if ( System . getProperty ( PREFER_IPV6_ADDRESS_PROPERTY ) != null ) { toGetIpv4 = "false" . equals ( System . getProperty ( PREFER_IPV6_ADDRESS_PROPERTY ) ) ; } NetworkInterface netIF = NetworkInterfa... | Returns all the IPs associated with the provided interface if any in textual form . |
33,437 | public static String getDefaultIP ( String strInterface ) throws UnknownHostException { String [ ] ips = getIPs ( strInterface ) ; return ips [ 0 ] ; } | Returns the first available IP address associated with the provided network interface |
33,438 | public static String [ ] getHosts ( String strInterface , String nameserver ) throws UnknownHostException { String [ ] ips = getIPs ( strInterface ) ; Vector < String > hosts = new Vector < String > ( ) ; for ( int ctr = 0 ; ctr < ips . length ; ctr ++ ) try { hosts . add ( reverseDns ( InetAddress . getByName ( ips [ ... | Returns all the host names associated by the provided nameserver with the address bound to the specified network interface |
33,439 | private synchronized void refreshCachedData ( ) throws IOException { IOUtils . closeStream ( committedTxnId ) ; File currentDir = journalStorage . getSingularStorageDir ( ) . getCurrentDir ( ) ; this . lastPromisedEpoch = new PersistentLongFile ( new File ( currentDir , LAST_PROMISED_FILENAME ) , 0 ) ; this . lastWrite... | Reload any data that may have been cached . This is necessary when we first load the Journal but also after any formatting operation since the cached data is no longer relevant . |
33,440 | private synchronized void copyMetaFilesForUpgrade ( ) throws IOException { Configuration conf = new Configuration ( ) ; File currentDir = journalStorage . getSingularStorageDir ( ) . getCurrentDir ( ) ; File prevDir = journalStorage . getSingularStorageDir ( ) . getPreviousTmp ( ) ; FileSystem fs = FileSystem . getLoca... | After an upgrade we must ensure that the current directory still holds all epoch committed txid and paxos files that it had before we did the upgrade . |
33,441 | public void rollbackImage ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getLayoutVersion ( ) != 0 , "can't rollback with uninitialized layout version: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Rolling back image " + this . getJournalId ( ) + " with namespace info:... | Rollback the local image storage with the given namespace . |
33,442 | private void doUpgradeImage ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getNamespaceID ( ) != 0 , "can't upgrade with uninitialized namespace info: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Upgrading image " + this . getJournalId ( ) + " with namespace info: (" ... | Upgrade the local image storage with the given namespace . |
33,443 | private void completeUpgradeImage ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getNamespaceID ( ) != 0 , "can't upgrade with uninitialized namespace info: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Completing Upgrading image " + this . getJournalId ( ) + " with na... | Complete the upgrade for local image storage with the given namespace . |
33,444 | private void completeUpgradeJournal ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getNamespaceID ( ) != 0 , "can't upgrade with uninitialized namespace info: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Completing Upgrading journal" + this . getJournalId ( ) + " with... | Complete the upgrade for local journal storage with the given namespace . |
33,445 | public void rollbackJournal ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getLayoutVersion ( ) != 0 , "can't rollback with uninitialized layout version : %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Rolling back journal " + this . getJournalId ( ) + " with namespace ... | Rollback the local journal storage with the given namespace . |
33,446 | private void doUpgradeJournal ( NamespaceInfo nsInfo ) throws IOException { Preconditions . checkState ( nsInfo . getNamespaceID ( ) != 0 , "can't upgrade with uninitialized namespace info: %s" , nsInfo . toColonSeparatedString ( ) ) ; LOG . info ( "Upgrading journal " + this . getJournalId ( ) + " with namespace info:... | Upgrade the local journal storage with the given namespace . |
33,447 | private void recoverJournal ( StartupOption startOpt ) throws IOException { LOG . info ( "Recovering journal " + this . getJournalId ( ) ) ; journalStorage . recover ( startOpt ) ; } | Recover the local journal storage . |
33,448 | private void recoverImage ( StartupOption startOpt ) throws IOException { LOG . info ( "Recovering image" + this . getJournalId ( ) ) ; imageStorage . recover ( startOpt ) ; } | Recover the local image storage . |
33,449 | public void close ( ) throws IOException { journalStorage . close ( ) ; imageStorage . close ( ) ; IOUtils . closeStream ( committedTxnId ) ; } | Unlock and release resources . |
33,450 | synchronized NewEpochResponseProto newEpoch ( NamespaceInfo nsInfo , long epoch ) throws IOException { checkJournalStorageFormatted ( ) ; journalStorage . checkConsistentNamespace ( nsInfo ) ; if ( imageStorage . isFormatted ( ) ) { imageStorage . checkConsistentNamespace ( nsInfo ) ; } if ( epoch <= getLastPromisedEpo... | Try to create a new epoch for this journal . |
33,451 | private synchronized void checkRequest ( RequestInfo reqInfo ) throws IOException { if ( reqInfo . getEpoch ( ) < lastPromisedEpoch . get ( ) ) { throw new IOException ( "IPC's epoch " + reqInfo . getEpoch ( ) + " is less than the last promised epoch " + lastPromisedEpoch . get ( ) ) ; } else if ( reqInfo . getEpoch ( ... | Ensure that the given request is coming from the correct writer and in - order . |
33,452 | public synchronized void startLogSegment ( RequestInfo reqInfo , long txid ) throws IOException { assert fjm != null ; checkJournalStorageFormatted ( ) ; checkRequest ( reqInfo ) ; if ( curSegment != null ) { LOG . warn ( "Client is requesting a new log segment " + txid + " though we are already writing " + curSegment ... | Start a new segment at the given txid . The previous segment must have already been finalized . |
33,453 | public synchronized void finalizeLogSegment ( RequestInfo reqInfo , long startTxId , long endTxId ) throws IOException { checkJournalStorageFormatted ( ) ; checkRequest ( reqInfo ) ; boolean needsValidation = true ; if ( startTxId == curSegmentTxId ) { if ( curSegment != null ) { curSegment . close ( ) ; curSegment = n... | Finalize the log segment at the given transaction ID . |
33,454 | private void purgePaxosDecision ( long segmentTxId ) throws IOException { File paxosFile = journalStorage . getPaxosFile ( segmentTxId ) ; if ( paxosFile . exists ( ) ) { if ( ! paxosFile . delete ( ) ) { throw new IOException ( "Unable to delete paxos file " + paxosFile ) ; } } } | Remove the previously - recorded accepted recovery information for a given log segment once it is no longer necessary . |
33,455 | File syncLog ( RequestInfo reqInfo , final SegmentStateProto segment , final URL url ) throws IOException { long startTxId = segment . getStartTxId ( ) ; long epoch = reqInfo . getEpoch ( ) ; return syncLog ( epoch , segment . getStartTxId ( ) , url , segment . toString ( ) , journalStorage . getSyncLogTemporaryFile ( ... | Synchronize a log segment from another JournalNode . The log is downloaded from the provided URL into a temporary location on disk which is named based on the current request s epoch . |
33,456 | File syncLog ( long stamp , final long startTxId , final URL url , String name , File tmpFile ) throws IOException { final File [ ] localPaths = new File [ ] { tmpFile } ; LOG . info ( "Synchronizing log " + name + " from " + url ) ; boolean success = false ; try { TransferFsImage . doGetUrl ( url , ImageSet . convertF... | Synchronize a log segment from another JournalNode . The log is downloaded from the provided URL into a temporary location on disk |
33,457 | private void completeHalfDoneAcceptRecovery ( PersistedRecoveryPaxosData paxosData ) throws IOException { if ( paxosData == null ) { return ; } long segmentId = paxosData . getSegmentState ( ) . getStartTxId ( ) ; long epoch = paxosData . getAcceptedInEpoch ( ) ; File tmp = journalStorage . getSyncLogTemporaryFile ( se... | In the case the node crashes in between downloading a log segment and persisting the associated paxos recovery data the log segment will be left in its temporary location on disk . Given the paxos data we can check if this was indeed the case and " ; roll forward" ; the atomic operation . |
33,458 | private PersistedRecoveryPaxosData getPersistedPaxosData ( long segmentTxId ) throws IOException { File f = journalStorage . getPaxosFile ( segmentTxId ) ; if ( ! f . exists ( ) ) { return null ; } InputStream in = new FileInputStream ( f ) ; try { PersistedRecoveryPaxosData ret = PersistedRecoveryPaxosData . parseDeli... | Retrieve the persisted data for recovering the given segment from disk . |
33,459 | private void persistPaxosData ( long segmentTxId , PersistedRecoveryPaxosData newData ) throws IOException { File f = journalStorage . getPaxosFile ( segmentTxId ) ; boolean success = false ; AtomicFileOutputStream fos = new AtomicFileOutputStream ( f ) ; try { newData . writeDelimitedTo ( fos ) ; fos . write ( '\n' ) ... | Persist data for recovering the given segment from disk . |
33,460 | public void saveDigestAndRenameCheckpointImage ( long txid , MD5Hash digest ) throws IOException { MD5Hash storedDigest = checkpointImageDigests . get ( txid ) ; if ( storedDigest == null || ! storedDigest . equals ( digest ) ) { throw new IOException ( "Digest of data written: " + storedDigest + " does not match reque... | Roll the image . |
33,461 | public static final DataTransferThrottler getThrottler ( Configuration conf , boolean disableThrottler ) { if ( disableThrottler ) { return null ; } long transferBandwidth = conf . getLong ( HdfsConstants . DFS_IMAGE_TRANSFER_RATE_KEY , HdfsConstants . DFS_IMAGE_TRANSFER_RATE_DEFAULT ) ; DataTransferThrottler throttler... | Construct a throttler from conf |
33,462 | public static void setVerificationHeaders ( HttpServletResponse response , File file ) throws IOException { response . setHeader ( TransferFsImage . CONTENT_LENGTH , String . valueOf ( file . length ( ) ) ) ; MD5Hash hash = MD5FileUtils . readStoredMd5ForFile ( file ) ; if ( hash != null ) { response . setHeader ( Tran... | Set headers for content length and if available md5 . |
33,463 | public boolean reserveSpaceWithCheckSum ( Path f , long size ) { RawInMemoryFileSystem mfs = ( RawInMemoryFileSystem ) getRawFileSystem ( ) ; synchronized ( mfs ) { boolean b = mfs . reserveSpace ( f , size ) ; if ( b ) { long checksumSize = getChecksumFileLength ( f , size ) ; b = mfs . reserveSpace ( getChecksumFile ... | Register a file with its size . This will also register a checksum for the file that the user is trying to create . This is required since none of the FileSystem APIs accept the size of the file as argument . But since it is required for us to apriori know the size of the file we are going to create the user must call ... |
33,464 | public static void setTheManager ( ) { if ( "true" . equalsIgnoreCase ( System . getProperty ( ACTIVATE_FLAG ) ) ) { if ( ! ( System . getSecurityManager ( ) instanceof DnsMonitorSecurityManager ) ) { System . setSecurityManager ( theManager ) ; } } } | Update s the system s security manager to an instance of this class if system property dns_call_stack_logging is set to true . |
33,465 | static ZookeeperTxId checkZooKeeperBeforeFailover ( Configuration startupConf , Configuration confg , boolean noverification ) throws IOException { AvatarZooKeeperClient zk = null ; String fsname = startupConf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; int maxTries = startupConf . getInt ( "dfs.avatarnode.zk.r... | Verifies whether we are in a consistent state before we perform a failover |
33,466 | static long writeToZooKeeperAfterFailover ( Configuration startupConf , Configuration confg ) throws IOException { AvatarZooKeeperClient zk = null ; String address = startupConf . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; String realAddress = confg . get ( NameNode . DFS_NAMENODE_RPC_ADDRESS_KEY ) ; int maxTrie... | Performs some operations after failover such as writing a new session id and registering to zookeeper as the new primary . |
33,467 | static void writeLastTxidToZookeeper ( long lastTxid , long totalBlocks , long totalInodes , long ssid , Configuration startupConf , Configuration confg ) throws IOException { AvatarZooKeeperClient zk = null ; LOG . info ( "Writing lastTxId: " + lastTxid + ", total blocks: " + totalBlocks + ", total inodes: " + totalIn... | Writes the last transaction id of the primary avatarnode to zookeeper . |
33,468 | static String getPrimaryRegistration ( Configuration startupConf , Configuration conf , String fsname ) throws IOException { AvatarZooKeeperClient zk = null ; int maxTries = startupConf . getInt ( "dfs.avatarnode.zk.retries" , 3 ) ; for ( int i = 0 ; i < maxTries ; i ++ ) { try { zk = new AvatarZooKeeperClient ( conf ,... | Obtain the registration of the primary from zk . |
33,469 | private static boolean registerClientProtocolAddress ( AvatarZooKeeperClient zk , Configuration originalConf , Configuration conf , boolean toOverwrite ) throws UnsupportedEncodingException , IOException { LOG . info ( "Updating Client Address information in ZooKeeper" ) ; InetSocketAddress addr = NameNode . getClientP... | Registers namenode s address in zookeeper |
33,470 | private static void registerDnProtocolAddress ( AvatarZooKeeperClient zk , Configuration originalConf , Configuration conf , boolean toOverwrite ) throws UnsupportedEncodingException , IOException { LOG . info ( "Updating Service Address information in ZooKeeper" ) ; registerSocketAddress ( zk , originalConf . get ( Na... | Registers the datanode protocol address in the zookeeper |
33,471 | private static void registerHttpAddress ( AvatarZooKeeperClient zk , Configuration originalConf , Configuration conf , boolean toOverwrite ) throws UnsupportedEncodingException , IOException { LOG . info ( "Updating Http Address information in ZooKeeper" ) ; String addr = conf . get ( FSConstants . DFS_NAMENODE_HTTP_AD... | Registers the http address of the namenode in the zookeeper |
33,472 | private synchronized void load ( ) { Map < String , HadoopServer > map = new TreeMap < String , HadoopServer > ( ) ; for ( File file : saveDir . listFiles ( ) ) { try { HadoopServer server = new HadoopServer ( file ) ; map . put ( server . getLocationName ( ) , server ) ; } catch ( Exception exn ) { System . err . prin... | Load all available locations from the workspace configuration directory . |
33,473 | public synchronized void updateServer ( String originalName , HadoopServer server ) { if ( ! server . getLocationName ( ) . equals ( originalName ) ) { servers . remove ( originalName ) ; servers . put ( server . getLocationName ( ) , server ) ; } store ( ) ; fireListeners ( server , SERVER_STATE_CHANGED ) ; } | Update one Hadoop location |
33,474 | synchronized public boolean selectInputStreams ( Collection < EditLogInputStream > streams , long fromTxId , boolean inProgressOk , boolean validateInProgressSegments , int minRedundancy ) throws IOException { final PriorityQueue < EditLogInputStream > allStreams = new PriorityQueue < EditLogInputStream > ( 64 , EDIT_L... | Selects input streams . Returns true if each stream meets min redundancy false otherwise . |
33,475 | synchronized boolean hasUnfinalizedSegments ( long fromTxId ) { List < EditLogInputStream > streams = new ArrayList < EditLogInputStream > ( ) ; for ( JournalAndStream jas : journals ) { if ( jas . isDisabled ( ) ) { continue ; } try { jas . getManager ( ) . selectInputStreams ( streams , fromTxId , true , false ) ; fo... | Check if any journal manager has unfinalized segments . |
33,476 | private static boolean isLocalJournal ( JournalManager jm ) { if ( jm == null || ( ! ( jm instanceof FileJournalManager ) ) ) { return false ; } return NNStorage . isPreferred ( StorageLocationType . LOCAL , ( ( FileJournalManager ) jm ) . getStorageDirectory ( ) ) ; } | Check if the given journal is local . |
33,477 | private void disableAndReportErrorOnJournals ( List < JournalAndStream > badJournals , String status ) throws IOException { if ( badJournals == null || badJournals . isEmpty ( ) ) { if ( forceJournalCheck ) { forceJournalCheck = false ; checkJournals ( status ) ; } return ; } for ( JournalAndStream j : badJournals ) { ... | Called when some journals experience an error in some operation . |
33,478 | private void mapJournalsAndReportErrors ( JournalClosure closure , String status ) throws IOException { List < JournalAndStream > badJAS = null ; for ( JournalAndStream jas : journals ) { try { closure . apply ( jas ) ; } catch ( Throwable t ) { if ( badJAS == null ) badJAS = new LinkedList < JournalAndStream > ( ) ; L... | Apply the given operation across all of the journal managers disabling any for which the closure throws an IOException . |
33,479 | private void mapJournalsAndReportErrorsParallel ( JournalClosure closure , String status ) throws IOException { List < Future < JournalAndStream > > jasResponeses = new ArrayList < Future < JournalAndStream > > ( journals . size ( ) ) ; for ( JournalAndStream jas : journals ) { jasResponeses . add ( executor . submit (... | Apply the given operation across all of the journal managers disabling any for which the closure throws an IOException . Do it in parallel . |
33,480 | void updateJournalMetrics ( ) { if ( metrics == null ) { return ; } int failedJournals = 0 ; for ( JournalAndStream jas : journals ) { if ( jas . isDisabled ( ) ) { failedJournals ++ ; } } metrics . journalsFailed . set ( failedJournals ) ; } | Get the number of available journals . |
33,481 | protected int checkJournals ( String status ) throws IOException { boolean abort = false ; int journalsAvailable = 0 ; int nonLocalJournalsAvailable = 0 ; for ( JournalAndStream jas : journals ) { if ( jas . isDisabled ( ) && jas . isRequired ( ) ) { abort = true ; } else if ( jas . isResourceAvailable ( ) ) { journals... | Checks if the number of journals available is not below minimum . Only invoked at errors . |
33,482 | public synchronized RemoteEditLogManifest getEditLogManifest ( long fromTxId ) { List < RemoteEditLog > allLogs = new ArrayList < RemoteEditLog > ( ) ; for ( JournalAndStream j : journals ) { JournalManager jm = j . getManager ( ) ; try { allLogs . addAll ( jm . getEditLogManifest ( fromTxId ) . getLogs ( ) ) ; } catch... | Return a manifest of what edit logs are available . All available edit logs are returned starting from the transaction id passed including inprogress segments . |
33,483 | String getSyncTimes ( ) { StringBuilder buf = new StringBuilder ( ) ; for ( JournalAndStream jas : journals ) { if ( jas . isActive ( ) ) { buf . append ( jas . getCurrentStream ( ) . getTotalSyncTime ( ) ) ; buf . append ( " " ) ; } } return buf . toString ( ) ; } | Add sync times to the buffer . |
33,484 | public void transitionNonFileJournals ( StorageInfo nsInfo , boolean checkEmpty , Transition transition , StartupOption startOpt ) throws IOException { for ( JournalManager jm : getJournalManagers ( ) ) { if ( ! ( jm instanceof FileJournalManager ) ) { if ( checkEmpty && jm . hasSomeJournalData ( ) ) { LOG . warn ( "Jo... | Transition the non - file journals . |
33,485 | public static EditLogInputStream getInputStream ( JournalManager jm , long txid ) throws IOException { List < EditLogInputStream > streams = new ArrayList < EditLogInputStream > ( ) ; jm . selectInputStreams ( streams , txid , true , false ) ; if ( streams . size ( ) < 1 ) { throw new IOException ( "Cannot obtain strea... | Get input stream from the given journal starting at txid . Does not perform validation of the streams . |
33,486 | public List < JournalManager > getNonFileJournalManagers ( ) { List < JournalManager > list = new ArrayList < JournalManager > ( ) ; for ( JournalManager jm : getJournalManagers ( ) ) { if ( ! ( jm instanceof FileJournalManager ) ) { list . add ( jm ) ; } } return list ; } | Return all non - file journal managers . |
33,487 | void attemptRestoreRemovedStorage ( ) { if ( removedStorageDirs . size ( ) == 0 ) return ; synchronized ( this . restorationLock ) { LOG . info ( "attemptRestoreRemovedStorage: check removed(failed) " + "storage. removedStorages size = " + removedStorageDirs . size ( ) ) ; for ( Iterator < StorageDirectory > it = this ... | See if any of removed storages is writable again and can be returned into service . |
33,488 | StorageDirectory getStorageDirectory ( URI uri ) { try { uri = Util . fileAsURI ( new File ( uri ) ) ; Iterator < StorageDirectory > it = dirIterator ( ) ; for ( ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; if ( Util . fileAsURI ( sd . getRoot ( ) ) . equals ( uri ) ) { return sd ; } } } catch ( IOExc... | Return the storage directory corresponding to the passed URI |
33,489 | private static void checkSchemeConsistency ( URI u ) throws IOException { String scheme = u . getScheme ( ) ; if ( scheme == null ) { throw new IOException ( "Undefined scheme for " + u ) ; } } | Checks the consistency of a URI in particular if the scheme is specified |
33,490 | int getNumStorageDirs ( NameNodeDirType dirType ) { if ( dirType == null ) return getNumStorageDirs ( ) ; Iterator < StorageDirectory > it = dirIterator ( dirType ) ; int numDirs = 0 ; for ( ; it . hasNext ( ) ; it . next ( ) ) numDirs ++ ; return numDirs ; } | Return number of storage directories of the given type . |
33,491 | Collection < File > getDirectories ( NameNodeDirType dirType ) throws IOException { ArrayList < File > list = new ArrayList < File > ( ) ; Iterator < StorageDirectory > it = ( dirType == null ) ? dirIterator ( ) : dirIterator ( dirType ) ; for ( ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; list . add ... | Return the list of locations being used for a specific purpose . i . e . Image or edit log storage . |
33,492 | static long readTransactionIdFile ( StorageDirectory sd ) throws IOException { File txidFile = getStorageFile ( sd , NameNodeFile . SEEN_TXID ) ; long txid = 0L ; if ( txidFile . exists ( ) && txidFile . canRead ( ) ) { BufferedReader br = new BufferedReader ( new FileReader ( txidFile ) ) ; try { txid = Long . valueOf... | Determine the last transaction ID noted in this storage directory . This txid is stored in a special seen_txid file since it might not correspond to the latest image or edit log . For example an image - only directory will have this txid incremented when edits logs roll even though the edits logs are in a different dir... |
33,493 | void writeTransactionIdFile ( StorageDirectory sd , long txid ) throws IOException { if ( txid < - 1 ) { throw new IOException ( "Bad txid: " + txid ) ; } File txIdFile = getStorageFile ( sd , NameNodeFile . SEEN_TXID ) ; OutputStream fos = new AtomicFileOutputStream ( txIdFile ) ; try { fos . write ( String . valueOf ... | Write last checkpoint time into a separate file . |
33,494 | public void writeTransactionIdFileToStorage ( long txid , FSImage image ) throws IOException { List < StorageDirectory > badSDs = new ArrayList < StorageDirectory > ( ) ; for ( StorageDirectory sd : storageDirs ) { try { writeTransactionIdFile ( sd , txid ) ; } catch ( IOException e ) { LOG . warn ( "writeTransactionId... | Write a small file in all available storage directories that indicates that the namespace has reached some given transaction ID . |
33,495 | public File [ ] getFsImageNameCheckpoint ( long txid ) { ArrayList < File > list = new ArrayList < File > ( ) ; for ( Iterator < StorageDirectory > it = dirIterator ( NameNodeDirType . IMAGE ) ; it . hasNext ( ) ; ) { list . add ( getStorageFile ( it . next ( ) , NameNodeFile . IMAGE_NEW , txid ) ) ; } return list . to... | Return the name of the image file that is uploaded by periodic checkpointing |
33,496 | public File getFsImageName ( StorageLocationType type , long txid ) { File lastCandidate = null ; for ( Iterator < StorageDirectory > it = dirIterator ( NameNodeDirType . IMAGE ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; File fsImage = getStorageFile ( sd , NameNodeFile . IMAGE , txid ) ; if ( sd .... | Return the name of the image file preferring type images . Otherwise return any image . |
33,497 | public void format ( ) throws IOException { this . layoutVersion = FSConstants . LAYOUT_VERSION ; this . namespaceID = newNamespaceID ( ) ; this . cTime = 0L ; for ( Iterator < StorageDirectory > it = dirIterator ( ) ; it . hasNext ( ) ; ) { StorageDirectory sd = it . next ( ) ; format ( sd ) ; } } | Format all available storage directories . |
33,498 | static int newNamespaceID ( ) { Random r = new Random ( ) ; r . setSeed ( FSNamesystem . now ( ) ) ; int newID = 0 ; while ( newID == 0 ) newID = r . nextInt ( 0x7FFFFFFF ) ; return newID ; } | Generate new namespaceID . |
33,499 | String getDeprecatedProperty ( String prop ) { assert getLayoutVersion ( ) > FSConstants . LAYOUT_VERSION : "getDeprecatedProperty should only be done when loading " + "storage from past versions during upgrade." ; return deprecatedProperties . get ( prop ) ; } | Return a property that was stored in an earlier version of HDFS . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.