idx int64 0 41.2k | question stringlengths 73 5.81k | target stringlengths 5 918 |
|---|---|---|
33,000 | public INode [ ] getExistingPathINodes ( String path ) { byte [ ] [ ] components = getPathComponents ( path ) ; INode [ ] inodes = new INode [ components . length ] ; this . getExistingPathINodes ( components , inodes ) ; return inodes ; } | Retrieve the existing INodes along the given path . The first INode always exist and is this INode . |
33,001 | int nextChild ( byte [ ] name ) { if ( name . length == 0 ) { return 0 ; } int nextPos = Collections . binarySearch ( children , name ) + 1 ; if ( nextPos >= 0 ) { return nextPos ; } return - nextPos ; } | Search all children for the first child whose name is greater than the given name . |
33,002 | < T extends INode > T addNode ( String path , T newNode , boolean inheritPermission ) throws FileNotFoundException { byte [ ] [ ] pathComponents = getPathComponents ( path ) ; if ( addToParent ( pathComponents , newNode , inheritPermission , true ) == null ) return null ; return newNode ; } | Add new INode to the file tree . Find the parent and insert |
33,003 | private long [ ] computeContentSummary ( long [ ] summary , Set < Long > visitedCtx ) { if ( children != null ) { for ( INode child : children ) { if ( child . isDirectory ( ) ) { ( ( INodeDirectory ) child ) . computeContentSummary ( summary , visitedCtx ) ; } else { if ( child instanceof INodeHardLinkFile ) { long hardLinkID = ( ( INodeHardLinkFile ) child ) . getHardLinkID ( ) ; if ( visitedCtx . contains ( hardLinkID ) ) { summary [ 1 ] ++ ; continue ; } else { visitedCtx . add ( hardLinkID ) ; child . computeContentSummary ( summary ) ; } } else { child . computeContentSummary ( summary ) ; } } } } summary [ 2 ] ++ ; return summary ; } | Compute the content summary and skip calculating the visited hard link file . |
33,004 | public void countItems ( ) { itemCounts = new ItemCounts ( ) ; itemCounts . startTime = System . currentTimeMillis ( ) ; itemCounts . numDirectories = 1 ; itemCounts . numFiles = 0 ; itemCounts . numBlocks = 0 ; if ( children != null ) { for ( INode child : children ) { countItemsRecursively ( child ) ; } } itemCounts . finishTime = System . currentTimeMillis ( ) ; } | Count items under the current directory |
33,005 | static String stringifySolution ( int size , List < List < ColumnName > > solution ) { int [ ] [ ] picture = new int [ size ] [ size ] ; StringBuffer result = new StringBuffer ( ) ; for ( List < ColumnName > row : solution ) { int x = - 1 ; int y = - 1 ; int num = - 1 ; for ( ColumnName item : row ) { if ( item instanceof ColumnConstraint ) { x = ( ( ColumnConstraint ) item ) . column ; num = ( ( ColumnConstraint ) item ) . num ; } else if ( item instanceof RowConstraint ) { y = ( ( RowConstraint ) item ) . row ; } } picture [ y ] [ x ] = num ; } for ( int y = 0 ; y < size ; ++ y ) { for ( int x = 0 ; x < size ; ++ x ) { result . append ( picture [ y ] [ x ] ) ; result . append ( " " ) ; } result . append ( "\n" ) ; } return result . toString ( ) ; } | A string containing a representation of the solution . |
33,006 | private boolean [ ] generateRow ( boolean [ ] rowValues , int x , int y , int num ) { for ( int i = 0 ; i < rowValues . length ; ++ i ) { rowValues [ i ] = false ; } int xBox = ( int ) x / squareXSize ; int yBox = ( int ) y / squareYSize ; rowValues [ x * size + num - 1 ] = true ; rowValues [ size * size + y * size + num - 1 ] = true ; rowValues [ 2 * size * size + ( xBox * squareXSize + yBox ) * size + num - 1 ] = true ; rowValues [ 3 * size * size + size * x + y ] = true ; return rowValues ; } | Create a row that places num in cell x y . |
33,007 | public static void main ( String [ ] args ) throws IOException { if ( args . length == 0 ) { System . out . println ( "Include a puzzle on the command line." ) ; } for ( int i = 0 ; i < args . length ; ++ i ) { Sudoku problem = new Sudoku ( new FileInputStream ( args [ i ] ) ) ; System . out . println ( "Solving " + args [ i ] ) ; problem . solve ( ) ; } } | Solves a set of sudoku puzzles . |
33,008 | public boolean delete ( Path path , boolean recursive ) throws IOException { Path absolute = makeAbsolute ( path ) ; String srep = absolute . toUri ( ) . getPath ( ) ; if ( kfsImpl . isFile ( srep ) ) return kfsImpl . remove ( srep ) == 0 ; FileStatus [ ] dirEntries = listStatus ( absolute ) ; if ( ( ! recursive ) && ( dirEntries != null ) && ( dirEntries . length != 0 ) ) { throw new IOException ( "Directory " + path . toString ( ) + " is not empty." ) ; } if ( dirEntries != null ) { for ( int i = 0 ; i < dirEntries . length ; i ++ ) { delete ( new Path ( absolute , dirEntries [ i ] . getPath ( ) ) , recursive ) ; } } return kfsImpl . rmdir ( srep ) == 0 ; } | recursively delete the directory and its contents |
33,009 | public BlockLocation [ ] getFileBlockLocations ( FileStatus file , long start , long len ) throws IOException { if ( file == null ) { return null ; } String srep = makeAbsolute ( file . getPath ( ) ) . toUri ( ) . getPath ( ) ; String [ ] [ ] hints = kfsImpl . getDataLocation ( srep , start , len ) ; if ( hints == null ) { return null ; } BlockLocation [ ] result = new BlockLocation [ hints . length ] ; long blockSize = getDefaultBlockSize ( ) ; long length = len ; long blockStart = start ; for ( int i = 0 ; i < result . length ; ++ i ) { result [ i ] = new BlockLocation ( null , hints [ i ] , blockStart , length < blockSize ? length : blockSize ) ; blockStart += blockSize ; length -= blockSize ; } return result ; } | Return null if the file doesn t exist ; otherwise get the locations of the various chunks of the file file from KFS . |
33,010 | void verifyQuota ( long nsDelta , long dsDelta ) throws QuotaExceededException { long newCount = nsCount + nsDelta ; long newDiskspace = diskspace + dsDelta ; if ( nsDelta > 0 || dsDelta > 0 ) { if ( nsQuota >= 0 && nsQuota < newCount ) { throw new NSQuotaExceededException ( nsQuota , newCount ) ; } if ( dsQuota >= 0 && dsQuota < newDiskspace ) { throw new DSQuotaExceededException ( dsQuota , newDiskspace ) ; } } } | Verify if the namespace count disk space satisfies the quota restriction |
33,011 | public static void checkVersionUpgradable ( int oldVersion ) throws IOException { if ( oldVersion > LAST_UPGRADABLE_LAYOUT_VERSION ) { String msg = "*********** Upgrade is not supported from this older" + " version of storage to the current version." + " Please upgrade to " + LAST_UPGRADABLE_HADOOP_VERSION + " or a later version and then upgrade to current" + " version. Old layout version is " + ( oldVersion == 0 ? "'too old'" : ( "" + oldVersion ) ) + " and latest layout version this software version can" + " upgrade from is " + LAST_UPGRADABLE_LAYOUT_VERSION + ". ************" ; LOG . error ( msg ) ; throw new IOException ( msg ) ; } } | Checks if the upgrade from the given old version is supported . If no upgrade is supported it throws IncorrectVersionException . |
33,012 | protected void getFields ( Properties props , StorageDirectory sd ) throws IOException { String sv , st , sid , sct ; sv = props . getProperty ( LAYOUT_VERSION ) ; st = props . getProperty ( STORAGE_TYPE ) ; sid = props . getProperty ( NAMESPACE_ID ) ; sct = props . getProperty ( CHECK_TIME ) ; if ( sv == null || st == null || sid == null || sct == null ) throw new InconsistentFSStateException ( sd . root , "file " + STORAGE_FILE_VERSION + " is invalid." ) ; int rv = Integer . parseInt ( sv ) ; NodeType rt = NodeType . valueOf ( st ) ; int rid = Integer . parseInt ( sid ) ; long rct = Long . parseLong ( sct ) ; if ( ! storageType . equals ( rt ) || ! ( ( namespaceID == 0 ) || ( rid == 0 ) || namespaceID == rid ) ) throw new InconsistentFSStateException ( sd . root , "is incompatible with others. " + " namespaceID is " + namespaceID + " and rid is " + rid + "," + " storage type is " + storageType + " but rt is " + rt ) ; if ( rv < FSConstants . LAYOUT_VERSION ) throw new IncorrectVersionException ( rv , "storage directory " + sd . root . getCanonicalPath ( ) ) ; layoutVersion = rv ; storageType = rt ; namespaceID = rid ; cTime = rct ; } | Get common storage fields . Should be overloaded if additional fields need to be get . |
33,013 | public void writeAll ( ) throws IOException { this . layoutVersion = FSConstants . LAYOUT_VERSION ; for ( Iterator < StorageDirectory > it = storageDirs . iterator ( ) ; it . hasNext ( ) ; ) { it . next ( ) . write ( ) ; } } | Write all data storage files . |
33,014 | public void unlockAll ( ) throws IOException { for ( Iterator < StorageDirectory > it = storageDirs . iterator ( ) ; it . hasNext ( ) ; ) { it . next ( ) . unlock ( ) ; } } | Unlock all storage directories . |
33,015 | public boolean isLockSupported ( int idx ) throws IOException { StorageDirectory sd = storageDirs . get ( idx ) ; FileLock firstLock = null ; FileLock secondLock = null ; try { firstLock = sd . lock ; if ( firstLock == null ) { firstLock = sd . tryLock ( ) ; if ( firstLock == null ) return true ; } secondLock = sd . tryLock ( ) ; if ( secondLock == null ) return true ; } finally { if ( firstLock != null && firstLock != sd . lock ) { firstLock . release ( ) ; firstLock . channel ( ) . close ( ) ; } if ( secondLock != null ) { secondLock . release ( ) ; secondLock . channel ( ) . close ( ) ; } } return false ; } | Check whether underlying file system supports file locking . |
33,016 | public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; int totalSessionsToCtx = 0 , totalDeletedSessions = 0 ; for ( int i = 0 ; i < numNotifierThreads ; i ++ ) { totalSessionsToCtx += notifierThreads [ i ] . sessionsToCtx . size ( ) ; totalDeletedSessions += notifierThreads [ i ] . deletedSessions . size ( ) ; } jsonGenerator . writeNumberField ( "totalSessionsToCtx" , totalSessionsToCtx ) ; jsonGenerator . writeFieldName ( "sessionsToCtx" ) ; jsonGenerator . writeStartObject ( ) ; for ( int i = 0 ; i < numNotifierThreads ; i ++ ) { for ( ConcurrentMap . Entry < String , SessionNotificationCtx > entry : notifierThreads [ i ] . sessionsToCtx . entrySet ( ) ) { jsonGenerator . writeFieldName ( entry . getKey ( ) ) ; entry . getValue ( ) . write ( jsonGenerator ) ; } } jsonGenerator . writeEndObject ( ) ; jsonGenerator . writeNumberField ( "totalDeletedSessions" , totalDeletedSessions ) ; jsonGenerator . writeFieldName ( "deletedSessions" ) ; jsonGenerator . writeStartArray ( ) ; for ( int i = 0 ; i < numNotifierThreads ; i ++ ) { for ( String deletedSessionHandle : notifierThreads [ i ] . deletedSessions . keySet ( ) ) { jsonGenerator . writeString ( deletedSessionHandle ) ; } } jsonGenerator . writeEndArray ( ) ; jsonGenerator . writeEndObject ( ) ; } | Used to write the state of the SessionNotifier instance to disk when we are persisting the state of the ClusterManager |
33,017 | public void restoreAfterSafeModeRestart ( ) { for ( Map . Entry < String , SessionNotificationCtx > entry : sessionsToCtxFromDisk . entrySet ( ) ) { entry . getValue ( ) . setConf ( conf ) ; handleToNotifier ( entry . getKey ( ) ) . sessionsToCtx . put ( entry . getKey ( ) , entry . getValue ( ) ) ; sessionsToCtxFromDisk . remove ( entry ) ; } for ( String deletedSessionHandle : deletedSessionsFromDisk ) { SessionNotifierThread notifierThread = handleToNotifier ( deletedSessionHandle ) ; if ( notifierThread . sessionsToCtx . get ( deletedSessionHandle ) != null ) { notifierThread . deletedSessions . put ( deletedSessionHandle , notifierThread ) ; } deletedSessionsFromDisk . remove ( deletedSessionHandle ) ; } for ( int i = 0 ; i < numNotifierThreads ; i ++ ) { notifierThreads [ i ] . start ( ) ; } } | This method rebuilds members related to the SessionNotifier instance which were not directly persisted themselves . |
33,018 | public static void checkSuperuserPrivilege ( UserGroupInformation owner , String supergroup ) throws AccessControlException { PermissionChecker checker = new PermissionChecker ( owner . getUserName ( ) , supergroup ) ; if ( ! checker . isSuper ) { throw new AccessControlException ( "Access denied for user " + checker . user + ". Superuser privilege is required" ) ; } } | Verify if the caller has the required permission . This will result into an exception if the caller is not allowed to access the resource . |
33,019 | public int getMaxSlots ( TaskTrackerStatus status , TaskType type ) { return ( type == TaskType . MAP ) ? status . getMaxMapSlots ( ) : status . getMaxReduceSlots ( ) ; } | Obtain the overall number of the slots limit of a tasktracker |
33,020 | public static void rename ( FileSystem fs , String oldName , String newName ) throws IOException { Path oldDir = new Path ( oldName ) ; Path newDir = new Path ( newName ) ; if ( ! fs . rename ( oldDir , newDir ) ) { throw new IOException ( "Could not rename " + oldDir + " to " + newDir ) ; } } | Renames an existing map directory . |
33,021 | public static void delete ( FileSystem fs , String name ) throws IOException { Path dir = new Path ( name ) ; Path data = new Path ( dir , DATA_FILE_NAME ) ; Path index = new Path ( dir , INDEX_FILE_NAME ) ; fs . delete ( data , true ) ; fs . delete ( index , true ) ; fs . delete ( dir , true ) ; } | Deletes the named map file . |
33,022 | public static long fix ( FileSystem fs , Path dir , Class < ? extends Writable > keyClass , Class < ? extends Writable > valueClass , boolean dryrun , Configuration conf ) throws Exception { String dr = ( dryrun ? "[DRY RUN ] " : "" ) ; Path data = new Path ( dir , DATA_FILE_NAME ) ; Path index = new Path ( dir , INDEX_FILE_NAME ) ; int indexInterval = 128 ; if ( ! fs . exists ( data ) ) { throw new Exception ( dr + "Missing data file in " + dir + ", impossible to fix this." ) ; } if ( fs . exists ( index ) ) { return - 1 ; } SequenceFile . Reader dataReader = new SequenceFile . Reader ( fs , data , conf ) ; if ( ! dataReader . getKeyClass ( ) . equals ( keyClass ) ) { throw new Exception ( dr + "Wrong key class in " + dir + ", expected" + keyClass . getName ( ) + ", got " + dataReader . getKeyClass ( ) . getName ( ) ) ; } if ( ! dataReader . getValueClass ( ) . equals ( valueClass ) ) { throw new Exception ( dr + "Wrong value class in " + dir + ", expected" + valueClass . getName ( ) + ", got " + dataReader . getValueClass ( ) . getName ( ) ) ; } long cnt = 0L ; Writable key = ReflectionUtils . newInstance ( keyClass , conf ) ; Writable value = ReflectionUtils . newInstance ( valueClass , conf ) ; SequenceFile . Writer indexWriter = null ; if ( ! dryrun ) indexWriter = SequenceFile . createWriter ( fs , conf , index , keyClass , LongWritable . class ) ; try { long pos = 0L ; LongWritable position = new LongWritable ( ) ; while ( dataReader . next ( key , value ) ) { cnt ++ ; if ( cnt % indexInterval == 0 ) { position . set ( pos ) ; if ( ! dryrun ) indexWriter . append ( key , position ) ; } pos = dataReader . getPosition ( ) ; } } catch ( Throwable t ) { } dataReader . close ( ) ; if ( ! dryrun ) indexWriter . close ( ) ; return cnt ; } | This method attempts to fix a corrupt MapFile by re - creating its index . |
33,023 | private void findConfigFiles ( ) { if ( configFileName == null ) { String jsonConfigFileString = conf . getConfigFile ( ) . replace ( CoronaConf . DEFAULT_CONFIG_FILE , Configuration . MATERIALIZEDJSON ) ; File jsonConfigFile = new File ( jsonConfigFileString ) ; String jsonConfigFileName = null ; if ( jsonConfigFile . exists ( ) ) { jsonConfigFileName = jsonConfigFileString ; } else { URL u = classLoader . getResource ( jsonConfigFileString ) ; jsonConfigFileName = ( u != null ) ? u . getPath ( ) : null ; } if ( jsonConfigFileName != null ) { try { jsonConfigFile = new File ( jsonConfigFileName ) ; InputStream in = new BufferedInputStream ( new FileInputStream ( jsonConfigFile ) ) ; JSONObject json = conf . instantiateJsonObject ( in ) ; if ( json . has ( conf . xmlToThrift ( CoronaConf . DEFAULT_CONFIG_FILE ) ) ) { configFileName = jsonConfigFileName ; LOG . info ( "Attempt to find config file " + jsonConfigFileString + " as a file and in class loader returned " + configFileName ) ; } } catch ( IOException e ) { LOG . warn ( "IOException: " + "while parsing corona JSON configuration" ) ; } catch ( JSONException e ) { LOG . warn ( "JSONException: " + "while parsing corona JSON configuration" ) ; } } } if ( configFileName == null ) { String configFileString = conf . getConfigFile ( ) ; File configFile = new File ( configFileString ) ; if ( configFile . exists ( ) ) { configFileName = configFileString ; } else { URL u = classLoader . getResource ( configFileString ) ; configFileName = ( u != null ) ? u . getPath ( ) : null ; } LOG . info ( "Attempt to find config file " + configFileString + " as a file and in class loader returned " + configFileName ) ; } if ( poolsConfigFileName == null ) { String poolsConfigFileString = conf . getPoolsConfigFile ( ) ; File poolsConfigFile = new File ( poolsConfigFileString ) ; if ( poolsConfigFile . exists ( ) ) { poolsConfigFileName = poolsConfigFileString ; } else { URL u = classLoader . getResource ( poolsConfigFileString ) ; poolsConfigFileName = ( u != null ) ? u . getPath ( ) : null ; } LOG . info ( "Attempt to find pools config file " + poolsConfigFileString + " as a file and in class loader returned " + poolsConfigFileName ) ; } } | Find the configuration files as set file names or in the classpath . |
33,024 | public synchronized double getWeight ( PoolInfo poolInfo ) { Double weight = ( poolInfoToWeight == null ) ? null : poolInfoToWeight . get ( poolInfo ) ; return weight == null ? 1.0 : weight ; } | Get the weight for the pool |
33,025 | public synchronized int getPriority ( PoolInfo poolInfo ) { Integer priority = ( poolInfoToPriority == null ) ? null : poolInfoToPriority . get ( poolInfo ) ; return priority == null ? 0 : priority ; } | Get the priority for the pool |
33,026 | public synchronized ScheduleComparator getPoolComparator ( PoolInfo poolInfo ) { ScheduleComparator comparator = ( poolInfoToComparator == null ) ? null : poolInfoToComparator . get ( poolInfo ) ; return comparator == null ? defaultPoolComparator : comparator ; } | Get the comparator to use for scheduling sessions within a pool |
33,027 | public synchronized long getLocalityWait ( ResourceType type , LocalityLevel level ) { if ( level == LocalityLevel . ANY ) { return 0L ; } Long wait = level == LocalityLevel . NODE ? typeToNodeWait . get ( type ) : typeToRackWait . get ( type ) ; if ( wait == null ) { throw new IllegalArgumentException ( "Unknown type:" + type ) ; } return wait ; } | Get the locality wait to be used by the scheduler for a given ResourceType on a given LocalityLevel |
33,028 | public String generatePoolsConfigIfClassSet ( ) { if ( poolsConfigDocumentGenerator == null ) { return null ; } Document document = poolsConfigDocumentGenerator . generatePoolsDocument ( ) ; if ( document == null ) { LOG . warn ( "generatePoolsConfig: Did not generate a valid pools xml file" ) ; return null ; } File tempXmlFile ; try { TransformerFactory transformerFactory = TransformerFactory . newInstance ( ) ; transformerFactory . setAttribute ( "indent-number" , new Integer ( 2 ) ) ; Transformer transformer = transformerFactory . newTransformer ( ) ; transformer . setOutputProperty ( "{http://xml.apache.org/xslt}indent-amount" , "2" ) ; transformer . setOutputProperty ( OutputKeys . INDENT , "yes" ) ; DOMSource source = new DOMSource ( document ) ; tempXmlFile = File . createTempFile ( "tmpPoolsConfig" , "xml" ) ; if ( LOG . isDebugEnabled ( ) ) { StreamResult stdoutResult = new StreamResult ( System . out ) ; transformer . transform ( source , stdoutResult ) ; } StreamResult result = new StreamResult ( tempXmlFile ) ; transformer . transform ( source , result ) ; String md5 = org . apache . commons . codec . digest . DigestUtils . md5Hex ( new FileInputStream ( tempXmlFile ) ) ; File destXmlFile = new File ( conf . getPoolsConfigFile ( ) ) ; boolean success = tempXmlFile . renameTo ( destXmlFile ) ; LOG . info ( "generatePoolConfig: Renamed generated file " + tempXmlFile . getAbsolutePath ( ) + " to " + destXmlFile . getAbsolutePath ( ) + " returned " + success + " with md5sum " + md5 ) ; return md5 ; } catch ( TransformerConfigurationException e ) { LOG . warn ( "generatePoolConfig: Failed to write file" , e ) ; } catch ( IOException e ) { LOG . warn ( "generatePoolConfig: Failed to write file" , e ) ; } catch ( TransformerException e ) { LOG . warn ( "generatePoolConfig: Failed to write file" , e ) ; } return null ; } | Generate the new pools configuration using the configuration generator . The generated configuration is written to a temporary file and then atomically renamed to the specified destination file . This function may be called concurrently and it is safe to do so because of the atomic rename to the destination file . |
33,029 | public synchronized boolean reloadAllConfig ( boolean init ) throws IOException , SAXException , ParserConfigurationException , JSONException { if ( ! isConfigChanged ( init ) ) { return false ; } reloadConfig ( ) ; reloadPoolsConfig ( ) ; this . lastSuccessfulReload = ClusterManager . clock . getTime ( ) ; return true ; } | Reload all the configuration files if the config changed and set the last successful reload time . Synchronized due to potential conflict from a fetch pools config http request . |
33,030 | private boolean isConfigChanged ( boolean init ) throws IOException { if ( init && ( configFileName == null || ( poolsConfigFileName == null && conf . onlyAllowConfiguredPools ( ) ) ) ) { throw new IOException ( "ClusterManager needs a config and a " + "pools file to start" ) ; } if ( configFileName == null && poolsConfigFileName == null ) { return false ; } boolean configChanged = false ; if ( configFileName != null ) { File file = new File ( configFileName ) ; configChanged |= ( file . lastModified ( ) == 0 || file . lastModified ( ) > lastSuccessfulReload ) ; } if ( poolsConfigFileName != null ) { File file = new File ( poolsConfigFileName ) ; configChanged |= ( file . lastModified ( ) == 0 || file . lastModified ( ) > lastSuccessfulReload ) ; } return configChanged ; } | Check if the config files have changed since they were last read |
33,031 | private Element getRootElement ( String fileName ) throws IOException , SAXException , ParserConfigurationException { DocumentBuilderFactory docBuilderFactory = DocumentBuilderFactory . newInstance ( ) ; docBuilderFactory . setIgnoringComments ( true ) ; DocumentBuilder builder = docBuilderFactory . newDocumentBuilder ( ) ; Document doc = builder . parse ( new File ( fileName ) ) ; Element root = doc . getDocumentElement ( ) ; if ( ! matched ( root , CONFIGURATION_TAG_NAME ) ) { throw new IOException ( "Bad " + fileName ) ; } return root ; } | Get the root element of the XML document |
33,032 | private static String getText ( Element element ) { if ( element . getFirstChild ( ) == null ) { return "" ; } return ( ( Text ) element . getFirstChild ( ) ) . getData ( ) . trim ( ) ; } | Get the text inside of the Xml element |
33,033 | public synchronized void incrementLoad ( ResourceType type ) { Integer load = typeToTotalLoad . get ( type ) ; assert ( load != null ) ; typeToTotalLoad . put ( type , load + 1 ) ; } | Increment the number of running resources of a given type . |
33,034 | private Pair < String , String > readBalancedLine ( ) throws IOException { String line = readCountedLine ( ) ; if ( line == null ) { return null ; } while ( line . indexOf ( '\f' ) > 0 ) { line = line . substring ( line . indexOf ( '\f' ) ) ; } if ( line . length ( ) != 0 && line . charAt ( 0 ) == '\f' ) { String subjectLine = readCountedLine ( ) ; if ( subjectLine != null && subjectLine . length ( ) != 0 && apparentConfFileHeader ( line ) && apparentXMLFileStart ( subjectLine ) ) { StringBuilder sb = new StringBuilder ( ) ; while ( subjectLine != null && subjectLine . indexOf ( '\f' ) > 0 ) { subjectLine = subjectLine . substring ( subjectLine . indexOf ( '\f' ) ) ; } while ( subjectLine != null && ( subjectLine . length ( ) == 0 || subjectLine . charAt ( 0 ) != '\f' ) ) { sb . append ( subjectLine ) ; subjectLine = readCountedLine ( ) ; } if ( subjectLine != null ) { unreadCountedLine ( subjectLine ) ; } return new Pair < String , String > ( line , sb . toString ( ) ) ; } return readBalancedLine ( ) ; } String endlineString = ( version == 0 ? " " : " ." ) ; if ( line . length ( ) < endlineString . length ( ) ) { return new Pair < String , String > ( null , line ) ; } if ( ! endlineString . equals ( line . substring ( line . length ( ) - endlineString . length ( ) ) ) ) { StringBuilder sb = new StringBuilder ( line ) ; String addedLine ; do { addedLine = readCountedLine ( ) ; if ( addedLine == null ) { return new Pair < String , String > ( null , sb . toString ( ) ) ; } while ( addedLine . indexOf ( '\f' ) > 0 ) { addedLine = addedLine . substring ( addedLine . indexOf ( '\f' ) ) ; } if ( addedLine . length ( ) > 0 && addedLine . charAt ( 0 ) == '\f' ) { unreadCountedLine ( addedLine ) ; return new Pair < String , String > ( null , sb . toString ( ) ) ; } sb . append ( "\n" ) ; sb . append ( addedLine ) ; } while ( addedLine . length ( ) >= endlineString . length ( ) && ! endlineString . equals ( addedLine . substring ( addedLine . length ( ) - endlineString . length ( ) ) ) ) ; line = sb . toString ( ) ; } return new Pair < String , String > ( null , line ) ; } | no more input . |
33,035 | public Configuration initializeConf ( String [ ] keys , Configuration conf , FileSystem fs ) throws IOException { Configuration newConf = new Configuration ( conf ) ; if ( fs == null ) { fs = FileSystem . get ( conf ) ; } String suffix = fs . getUri ( ) . getAuthority ( ) ; for ( String key : keys ) { String value = conf . get ( key + "." + suffix ) ; if ( value != null ) { newConf . set ( key , value ) ; } } return newConf ; } | Initialize the config based on the given Filesystem |
33,036 | public void go ( EditsVisitor visitor ) throws IOException { setEditsLoader ( EditsLoader . LoaderFactory . getLoader ( visitor ) ) ; editsLoader . loadEdits ( ) ; } | Process EditLog file . |
33,037 | private void printHelp ( ) { String summary = "Usage: bin/hdfs oev [OPTIONS] -i INPUT_FILE -o OUTPUT_FILE\n" + "Offline edits viewer\n" + "Parse a Hadoop edits log file INPUT_FILE and save results\n" + "in OUTPUT_FILE.\n" + "Required command line arguments:\n" + "-i,--inputFile <arg> edits file to process, xml (case\n" + " insensitive) extension means XML format,\n" + " any other filename means binary format\n" + "-o,--outputFile <arg> Name of output file. If the specified\n" + " file exists, it will be overwritten,\n" + " format of the file is determined\n" + " by -p option\n" + "\n" + "Optional command line arguments:\n" + "-p,--processor <arg> Select which type of processor to apply\n" + " against image file, currently supported\n" + " processors are: binary (native binary format\n" + " that Hadoop uses), xml (default, XML\n" + " format), stats (prints statistics about\n" + " edits file)\n" + "-h,--help Display usage information and exit\n" + "-v,--verbose More verbose output, prints the input and\n" + " output filenames, for processors that write\n" + " to a file, also output to screen. On large\n" + " image files this will dramatically increase\n" + " processing time (default is false).\n" ; System . out . println ( summary ) ; System . out . println ( ) ; ToolRunner . printGenericCommandUsage ( System . out ) ; } | Print help . |
33,038 | public static PrintWriter initHTML ( ServletResponse response , String title ) throws IOException { response . setContentType ( "text/html" ) ; PrintWriter out = response . getWriter ( ) ; out . println ( "<html>\n" + "<link rel='stylesheet' type='text/css' href='/static/hadoop.css'>\n" + "<title>" + title + "</title>\n" + "<body>\n" + "<h1>" + title + "</h1>\n" ) ; return out ; } | Initial HTML header |
33,039 | public static String getParameter ( ServletRequest request , String name ) { String s = request . getParameter ( name ) ; if ( s == null ) { return null ; } s = s . trim ( ) ; return s . length ( ) == 0 ? null : s ; } | Get a parameter from a ServletRequest . Return null if the parameter contains only white spaces . |
33,040 | public static String percentageGraph ( int perc , int width ) throws IOException { assert perc >= 0 ; assert perc <= 100 ; StringBuilder builder = new StringBuilder ( ) ; builder . append ( "<table border=\"1px\" width=\"" ) ; builder . append ( width ) ; builder . append ( "px\"><tr>" ) ; if ( perc > 0 ) { builder . append ( "<td cellspacing=\"0\" class=\"perc_filled\" width=\"" ) ; builder . append ( perc ) ; builder . append ( "%\"></td>" ) ; } if ( perc < 100 ) { builder . append ( "<td cellspacing=\"0\" class=\"perc_nonfilled\" width=\"" ) ; builder . append ( 100 - perc ) ; builder . append ( "%\"></td>" ) ; } builder . append ( "</tr></table>" ) ; return builder . toString ( ) ; } | Generate the percentage graph and returns HTML representation string of the same . |
33,041 | public void write ( JsonGenerator jsonGenerator ) throws IOException { jsonGenerator . writeStartObject ( ) ; jsonGenerator . writeStringField ( "handle" , handle ) ; jsonGenerator . writeStringField ( "host" , host ) ; jsonGenerator . writeNumberField ( "port" , port ) ; jsonGenerator . writeNumberField ( "numPendingCalls" , pendingCalls . size ( ) ) ; jsonGenerator . writeFieldName ( "pendingCalls" ) ; jsonGenerator . writeStartArray ( ) ; for ( TBase call : pendingCalls ) { jsonGenerator . writeStartObject ( ) ; String callType = call . getClass ( ) . getName ( ) ; jsonGenerator . writeStringField ( "callType" , callType ) ; jsonGenerator . writeObjectField ( "call" , call ) ; jsonGenerator . writeEndObject ( ) ; } jsonGenerator . writeEndArray ( ) ; jsonGenerator . writeEndObject ( ) ; } | Used to write the state of the SessionNotificationCtx instance to disk when we are persisting the state of the ClusterManager |
33,042 | public boolean makeCalls ( long now ) { if ( now < nextDispatchTime ) return true ; while ( ! pendingCalls . isEmpty ( ) ) { TBase call = pendingCalls . get ( 0 ) ; try { init ( ) ; dispatchCall ( call ) ; nextDispatchTime = - 1 ; numRetries = 0 ; currentRetryInterval = retryIntervalStart ; pendingCalls . remove ( 0 ) ; } catch ( TException e ) { boolean logged = false ; if ( e instanceof TTransportException ) { TTransportException tte = ( TTransportException ) e ; Throwable cause = tte . getCause ( ) ; if ( cause != null && cause instanceof SocketTimeoutException ) { logged = true ; LOG . error ( "Call to session: " + handle + " for call: " + call . getClass ( ) . getName ( ) + ", numRetry: " + numRetries + "(retryCountMax=" + retryCountMax + ")" + " failed with SocketTimeoutException, will retry it" ) ; } } if ( ! logged ) { LOG . warn ( "Call to session: " + handle + " for call: " + call . getClass ( ) . getName ( ) + ", numRetry: " + numRetries + "(retryCountMax=" + retryCountMax + ")" + " failed with TException" , e ) ; } close ( ) ; if ( numRetries > retryCountMax ) return false ; numRetries ++ ; nextDispatchTime = now + currentRetryInterval ; currentRetryInterval *= retryIntervalFactor ; return true ; } } close ( ) ; return true ; } | make callbacks to the sessiondriver . if the function returns false then the session should be discarded |
33,043 | public static List < Class < ? extends CompressionCodec > > getCodecClasses ( Configuration conf ) { String codecsString = conf . get ( "io.compression.codecs" ) ; if ( codecsString != null ) { List < Class < ? extends CompressionCodec > > result = new ArrayList < Class < ? extends CompressionCodec > > ( ) ; StringTokenizer codecSplit = new StringTokenizer ( codecsString , "," ) ; while ( codecSplit . hasMoreElements ( ) ) { String codecSubstring = codecSplit . nextToken ( ) ; if ( codecSubstring . length ( ) != 0 ) { try { Class < ? > cls = conf . getClassByName ( codecSubstring ) ; if ( ! CompressionCodec . class . isAssignableFrom ( cls ) ) { throw new IllegalArgumentException ( "Class " + codecSubstring + " is not a CompressionCodec" ) ; } result . add ( cls . asSubclass ( CompressionCodec . class ) ) ; } catch ( ClassNotFoundException ex ) { throw new IllegalArgumentException ( "Compression codec " + codecSubstring + " not found." , ex ) ; } } } return result ; } else { return null ; } } | Get the list of codecs listed in the configuration |
33,044 | public static void setCodecClasses ( Configuration conf , List < Class > classes ) { StringBuffer buf = new StringBuffer ( ) ; Iterator < Class > itr = classes . iterator ( ) ; if ( itr . hasNext ( ) ) { Class cls = itr . next ( ) ; buf . append ( cls . getName ( ) ) ; while ( itr . hasNext ( ) ) { buf . append ( ',' ) ; buf . append ( itr . next ( ) . getName ( ) ) ; } } conf . set ( "io.compression.codecs" , buf . toString ( ) ) ; } | Sets a list of codec classes in the configuration . |
33,045 | public CompressionCodec getCodec ( Path file ) { CompressionCodec result = null ; if ( codecs != null ) { String filename = file . getName ( ) ; String reversedFilename = new StringBuffer ( filename ) . reverse ( ) . toString ( ) ; SortedMap < String , CompressionCodec > subMap = codecs . headMap ( reversedFilename ) ; if ( ! subMap . isEmpty ( ) ) { String potentialSuffix = subMap . lastKey ( ) ; if ( reversedFilename . startsWith ( potentialSuffix ) ) { result = codecs . get ( potentialSuffix ) ; } } } return result ; } | Find the relevant compression codec for the given file based on its filename suffix . |
33,046 | public CompressionCodec getCodecByClassName ( String classname ) { if ( codecsByClassName == null ) { return null ; } return codecsByClassName . get ( classname ) ; } | Find the relevant compression codec for the codec s canonical class name . |
33,047 | public static String removeSuffix ( String filename , String suffix ) { if ( filename . endsWith ( suffix ) ) { return filename . substring ( 0 , filename . length ( ) - suffix . length ( ) ) ; } return filename ; } | Removes a suffix from a filename if it has it . |
33,048 | public static void main ( String [ ] args ) throws Exception { Configuration conf = new Configuration ( ) ; CompressionCodecFactory factory = new CompressionCodecFactory ( conf ) ; boolean encode = false ; for ( int i = 0 ; i < args . length ; ++ i ) { if ( "-in" . equals ( args [ i ] ) ) { encode = true ; } else if ( "-out" . equals ( args [ i ] ) ) { encode = false ; } else { CompressionCodec codec = factory . getCodec ( new Path ( args [ i ] ) ) ; if ( codec == null ) { System . out . println ( "Codec for " + args [ i ] + " not found." ) ; } else { if ( encode ) { CompressionOutputStream out = codec . createOutputStream ( new java . io . FileOutputStream ( args [ i ] ) ) ; byte [ ] buffer = new byte [ 100 ] ; String inFilename = removeSuffix ( args [ i ] , codec . getDefaultExtension ( ) ) ; java . io . InputStream in = new java . io . FileInputStream ( inFilename ) ; int len = in . read ( buffer ) ; while ( len > 0 ) { out . write ( buffer , 0 , len ) ; len = in . read ( buffer ) ; } in . close ( ) ; out . close ( ) ; } else { CompressionInputStream in = codec . createInputStream ( new java . io . FileInputStream ( args [ i ] ) ) ; byte [ ] buffer = new byte [ 100 ] ; int len = in . read ( buffer ) ; while ( len > 0 ) { System . out . write ( buffer , 0 , len ) ; len = in . read ( buffer ) ; } in . close ( ) ; } } } } } | A little test program . |
33,049 | public void setInsert ( Document doc ) { this . op = Op . INSERT ; this . doc = doc ; this . term = null ; } | Set the instance to be an insert operation . |
33,050 | public void setDelete ( Term term ) { this . op = Op . DELETE ; this . doc = null ; this . term = term ; } | Set the instance to be a delete operation . |
33,051 | public void setUpdate ( Document doc , Term term ) { this . op = Op . UPDATE ; this . doc = doc ; this . term = term ; } | Set the instance to be an update operation . |
33,052 | public static < T extends VersionedProtocol > ProtocolProxy < T > getProtocolProxy ( Class < T > protocol , long clientVersion , InetSocketAddress addr , Configuration conf , SocketFactory factory ) throws IOException { UserGroupInformation ugi = null ; try { ugi = UserGroupInformation . login ( conf ) ; } catch ( LoginException le ) { throw new RuntimeException ( "Couldn't login!" ) ; } return getProtocolProxy ( protocol , clientVersion , addr , ugi , conf , factory ) ; } | Construct a client - side protocol proxy that contains a set of server methods and a proxy object implementing the named protocol talking to a server at the named address . |
33,053 | @ SuppressWarnings ( "unchecked" ) public static < T extends VersionedProtocol > ProtocolProxy < T > getProtocolProxy ( Class < T > protocol , long clientVersion , InetSocketAddress addr , UserGroupInformation ticket , Configuration conf , SocketFactory factory , int rpcTimeout ) throws IOException { T proxy = ( T ) Proxy . newProxyInstance ( protocol . getClassLoader ( ) , new Class [ ] { protocol } , new Invoker ( addr , ticket , conf , factory , rpcTimeout , protocol ) ) ; String protocolName = protocol . getName ( ) ; try { ProtocolSignature serverInfo = proxy . getProtocolSignature ( protocolName , clientVersion , ProtocolSignature . getFingerprint ( protocol . getMethods ( ) ) ) ; return new ProtocolProxy < T > ( protocol , proxy , serverInfo . getMethods ( ) ) ; } catch ( RemoteException re ) { IOException ioe = re . unwrapRemoteException ( IOException . class ) ; if ( ioe . getMessage ( ) . startsWith ( IOException . class . getName ( ) + ": " + NoSuchMethodException . class . getName ( ) ) ) { long serverVersion = proxy . getProtocolVersion ( protocol . getName ( ) , clientVersion ) ; if ( serverVersion == clientVersion ) { return new ProtocolProxy < T > ( protocol , proxy , null ) ; } throw new VersionMismatch ( protocolName , clientVersion , serverVersion , proxy ) ; } throw re ; } } | Construct a client - side proxy that implements the named protocol talking to a server at the named address . |
33,054 | public boolean reportChecksumFailure ( Path p , FSDataInputStream in , long inPos , FSDataInputStream sums , long sumsPos ) { try { File f = ( ( RawLocalFileSystem ) fs ) . pathToFile ( p ) . getCanonicalFile ( ) ; String device = new DF ( f , getConf ( ) ) . getMount ( ) ; File parent = f . getParentFile ( ) ; File dir = null ; while ( parent != null && parent . canWrite ( ) && parent . toString ( ) . startsWith ( device ) ) { dir = parent ; parent = parent . getParentFile ( ) ; } if ( dir == null ) { throw new IOException ( "not able to find the highest writable parent dir" ) ; } File badDir = new File ( dir , "bad_files" ) ; if ( ! badDir . mkdirs ( ) ) { if ( ! badDir . isDirectory ( ) ) { throw new IOException ( "Mkdirs failed to create " + badDir . toString ( ) ) ; } } String suffix = "." + rand . nextInt ( ) ; File badFile = new File ( badDir , f . getName ( ) + suffix ) ; LOG . warn ( "Moving bad file " + f + " to " + badFile ) ; in . close ( ) ; f . renameTo ( badFile ) ; File checkFile = ( ( RawLocalFileSystem ) fs ) . pathToFile ( getChecksumFile ( p ) ) ; checkFile . renameTo ( new File ( badDir , checkFile . getName ( ) + suffix ) ) ; } catch ( IOException e ) { LOG . warn ( "Error moving bad file " + p + ": " + e ) ; } return false ; } | Moves files to a bad file directory on the same device so that their storage will not be reused . |
33,055 | public IndexRecord getIndex ( int partition ) { final int pos = partition * MapTask . MAP_OUTPUT_INDEX_RECORD_LENGTH / 8 ; return new IndexRecord ( entries . get ( pos ) , entries . get ( pos + 1 ) , entries . get ( pos + 2 ) ) ; } | Get spill offsets for given partition . |
33,056 | public void putIndex ( IndexRecord rec , int partition ) { final int pos = partition * MapTask . MAP_OUTPUT_INDEX_RECORD_LENGTH / 8 ; entries . put ( pos , rec . startOffset ) ; entries . put ( pos + 1 , rec . rawLength ) ; entries . put ( pos + 2 , rec . partLength ) ; } | Set spill offsets for given partition . |
33,057 | public void writeToFile ( Path loc , JobConf job ) throws IOException { writeToFile ( loc , job , new PureJavaCrc32 ( ) ) ; } | Write this spill record to the location provided . |
33,058 | static void printFilterInfo ( PrintWriter out , String poolFilter , String userFilter , String showAllLink ) { if ( userFilter != null || poolFilter != null ) { StringBuilder customizedInfo = new StringBuilder ( "Only showing " ) ; if ( poolFilter != null ) { customizedInfo . append ( "pool(s) " + poolFilter ) ; } if ( userFilter != null ) { if ( customizedInfo . length ( ) != 0 ) { customizedInfo . append ( " and " ) ; } customizedInfo . append ( "user(s) " + userFilter ) ; } out . printf ( "<h3>%s <a href=\"%s\">(show all pools and users)</a></h3>" , customizedInfo . toString ( ) , showAllLink ) ; } } | Print the filter information for pools and users |
33,059 | private void showAdminFormPreemption ( PrintWriter out , boolean advancedView ) { out . print ( "<h2>Task Preemption</h2>\n" ) ; String advParam = advancedView ? "&advanced" : "" ; out . print ( generateSelect ( Arrays . asList ( "On,Off" . split ( "," ) ) , scheduler . isPreemptionEnabled ( ) ? "On" : "Off" , "/fairscheduler?setPreemptionEnabled=<CHOICE>" + advParam ) ) ; } | Print the administration form for preemption |
33,060 | private void showAdminFormMemBasedLoadMgr ( PrintWriter out , boolean advancedView ) { if ( ! ( loadMgr instanceof MemBasedLoadManager ) ) { return ; } out . print ( "<h2>Memory Based Scheduling</h2>\n" ) ; MemBasedLoadManager memLoadMgr = ( MemBasedLoadManager ) loadMgr ; Collection < String > possibleThresholds = Arrays . asList ( ( "0,1,2,3,4,5,6,7,8,9,10,1000" ) . split ( "," ) ) ; long reservedMemGB = ( long ) ( memLoadMgr . getReservedPhysicalMemoryOnTT ( ) / 1024D + 0.5 ) ; out . printf ( "<p>Reserve %s GB memory on one node." , generateSelect ( possibleThresholds , "" + reservedMemGB , "/fairscheduler?setTtThreshold=<CHOICE>" + ( advancedView ? "&advanced" : "" ) ) ) ; } | Print the administration form for the MemBasedLoadManager |
33,061 | static void showCluster ( PrintWriter out , boolean advancedView , JobTracker jobTracker ) { String cluster = "" ; try { cluster = JSPUtil . generateClusterResTable ( jobTracker ) ; if ( cluster . equals ( "" ) ) { return ; } } catch ( IOException e ) { return ; } out . print ( "<h2>Cluster Resource</h2>\n" ) ; out . print ( cluster ) ; } | Print the cluster resource utilization |
33,062 | private void showNumTaskPerHeartBeatOption ( PrintWriter out , boolean advancedView ) { out . print ( "<h2>Number of Assigned Tasks Per HeartBeat</h2>\n" ) ; out . printf ( "<p>Number of map tasks assigned per heartbeat:%s" , generateSelect ( Arrays . asList ( "1,2,3,4,5,6,7,8,9,10" . split ( "," ) ) , scheduler . getMapPerHeartBeat ( ) + "" , "/fairscheduler?setMapPerHeartBeat=<CHOICE>" + ( advancedView ? "&advanced" : "" ) ) ) ; out . printf ( "<p>Number of reduce tasks assigned per heartbeat:%s" , generateSelect ( Arrays . asList ( "1,2,3,4,5,6,7,8,9,10" . split ( "," ) ) , scheduler . getReducePerHeartBeat ( ) + "" , "/fairscheduler?setReducePerHeartBeat=<CHOICE>" + ( advancedView ? "&advanced" : "" ) ) ) ; } | Print the UI that allows us to change the number of tasks assigned per heartbeat . |
33,063 | private Collection < JobInProgress > getInitedJobs ( ) { Collection < JobInProgress > runningJobs = jobTracker . getRunningJobs ( ) ; for ( Iterator < JobInProgress > it = runningJobs . iterator ( ) ; it . hasNext ( ) ; ) { JobInProgress job = it . next ( ) ; if ( ! job . inited ( ) ) { it . remove ( ) ; } } return runningJobs ; } | Obtained all initialized jobs |
33,064 | private static void setIfUnset ( JobConf conf , String key , String value ) { if ( conf . get ( key ) == null ) { conf . set ( key , value ) ; } } | Set the configuration if it doesn t already have a value for the given key . |
33,065 | public static void main ( String [ ] args ) throws Exception { int exitCode = new Submitter ( ) . run ( args ) ; System . exit ( exitCode ) ; } | Submit a pipes job based on the command line arguments . |
33,066 | private void loadEnabledPermissionCheckingDirs ( Configuration conf ) throws IOException { if ( this . isPermissionEnabled ) { String [ ] permissionCheckingDirs = conf . getStrings ( "dfs.permissions.checking.paths" , "/" ) ; int numDirs = permissionCheckingDirs . length ; if ( numDirs == 0 ) { return ; } this . permissionEnabled = new INode [ numDirs ] ; int i = 0 ; for ( String src : permissionCheckingDirs ) { INode permissionEnabledNode = this . dir . getINode ( src ) ; if ( permissionEnabledNode == null ) { throw new IOException ( "Non-existent path for disabling permission Checking: " + src ) ; } permissionEnabled [ i ++ ] = permissionEnabledNode ; } } } | Load the predefined paths that should enable permission checking each of which represents the root of a subtree whose nodes should check permission |
33,067 | private boolean isPermissionCheckingEnabled ( INode [ ] pathNodes ) { if ( this . isPermissionEnabled ) { if ( permissionEnabled == null ) { return false ; } for ( INode enableDir : this . permissionEnabled ) { for ( INode pathNode : pathNodes ) { if ( pathNode == enableDir ) { return true ; } } } return false ; } return false ; } | Check if a path is predefined to enable permission checking |
33,068 | private void setHeartbeatInterval ( long heartbeatInterval , long heartbeatRecheckInterval ) { this . heartbeatInterval = heartbeatInterval ; this . heartbeatRecheckInterval = heartbeatRecheckInterval ; this . heartbeatExpireInterval = 2 * heartbeatRecheckInterval + 10 * heartbeatInterval ; ReplicationConfigKeys . blockInvalidateLimit = Math . max ( ReplicationConfigKeys . blockInvalidateLimit , 20 * ( int ) ( heartbeatInterval / 1000L ) ) ; } | Set parameters derived from heartbeat interval . |
33,069 | public void stopLeaseMonitor ( ) throws InterruptedException { if ( lmmonitor != null ) { lmmonitor . stop ( ) ; InjectionHandler . processEvent ( InjectionEvent . FSNAMESYSTEM_STOP_LEASEMANAGER ) ; } if ( lmthread != null ) { writeLock ( ) ; try { lmthread . interrupt ( ) ; } finally { writeUnlock ( ) ; } lmthread . join ( ) ; } } | Stops lease monitor thread . |
33,070 | public void close ( ) { fsRunning = false ; try { if ( pendingReplications != null ) { pendingReplications . stop ( ) ; } if ( hbthread != null ) { hbthread . interrupt ( ) ; } if ( underreplthread != null ) { underreplthread . interrupt ( ) ; } if ( overreplthread != null ) { overreplthread . interrupt ( ) ; } if ( raidEncodingTaskThread != null ) { raidEncodingTaskThread . interrupt ( ) ; } if ( dnthread != null ) { dnthread . interrupt ( ) ; } if ( automaticEditsRollingThread != null ) { automaticEditsRoller . stop ( ) ; automaticEditsRollingThread . join ( ) ; } if ( safeMode != null ) { safeMode . shutdown ( ) ; } } catch ( Exception e ) { LOG . warn ( "Exception shutting down FSNamesystem" , e ) ; } finally { try { LOG . info ( "Stopping LeaseManager" ) ; stopLeaseMonitor ( ) ; if ( InjectionHandler . trueCondition ( InjectionEvent . FSNAMESYSTEM_CLOSE_DIRECTORY ) ) { if ( dir != null ) { LOG . info ( "Stopping directory (fsimage, fsedits)" ) ; dir . close ( ) ; } } } catch ( InterruptedException ie ) { } catch ( IOException ie ) { LOG . error ( "Error closing FSDirectory" , ie ) ; IOUtils . cleanup ( LOG , dir ) ; } } } | Close down this file system manager . Causes heartbeat and lease daemons to stop ; waits briefly for them to finish but a short timeout returns control back to caller . |
33,071 | void metaSave ( String filename ) throws IOException { readLock ( ) ; try { checkSuperuserPrivilege ( ) ; File file = new File ( System . getProperty ( "hadoop.log.dir" ) , filename ) ; PrintWriter out = new PrintWriter ( new BufferedWriter ( new FileWriter ( file , true ) ) ) ; synchronized ( neededReplications ) { out . println ( "Metasave: Blocks waiting for replication: " + neededReplications . size ( ) ) ; for ( Block block : neededReplications ) { List < DatanodeDescriptor > containingNodes = new ArrayList < DatanodeDescriptor > ( ) ; NumberReplicas numReplicas = new NumberReplicas ( ) ; chooseSourceDatanode ( block , containingNodes , numReplicas ) ; int usableReplicas = numReplicas . liveReplicas ( ) + numReplicas . decommissionedReplicas ( ) ; out . print ( block + ( ( usableReplicas > 0 ) ? "" : " MISSING" ) + " size: " + block . getNumBytes ( ) + " (replicas:" + " l: " + numReplicas . liveReplicas ( ) + " d: " + numReplicas . decommissionedReplicas ( ) + " c: " + numReplicas . corruptReplicas ( ) + " e: " + numReplicas . excessReplicas ( ) + ") " ) ; Collection < DatanodeDescriptor > corruptNodes = corruptReplicas . getNodes ( block ) ; for ( Iterator < DatanodeDescriptor > jt = blocksMap . nodeIterator ( block ) ; jt . hasNext ( ) ; ) { DatanodeDescriptor node = jt . next ( ) ; String state = "" ; if ( corruptNodes != null && corruptNodes . contains ( node ) ) { state = "(corrupt)" ; } else if ( node . isDecommissioned ( ) || node . isDecommissionInProgress ( ) ) { state = "(decommissioned)" ; } out . print ( " " + node + state + " : " ) ; } out . println ( "" ) ; } } pendingReplications . metaSave ( out ) ; dumpRecentInvalidateSets ( out ) ; dumpExcessReplicasSets ( out ) ; datanodeDump ( out ) ; out . flush ( ) ; out . close ( ) ; } finally { readUnlock ( ) ; } } | Dump all metadata into specified file |
33,072 | private long addBlock ( Block block , List < BlockWithLocations > results ) { ArrayList < String > machineSet = new ArrayList < String > ( blocksMap . numNodes ( block ) ) ; for ( Iterator < DatanodeDescriptor > it = blocksMap . nodeIterator ( block ) ; it . hasNext ( ) ; ) { String storageID = it . next ( ) . getStorageID ( ) ; LightWeightHashSet < Block > blocks = recentInvalidateSets . get ( storageID ) ; if ( blocks == null || ! blocks . contains ( block ) ) { machineSet . add ( storageID ) ; } } if ( machineSet . size ( ) == 0 ) { return 0 ; } else { results . add ( new BlockWithLocations ( block , machineSet . toArray ( new String [ machineSet . size ( ) ] ) ) ) ; return block . getNumBytes ( ) ; } } | Get all valid locations of the block & add the block to results return the length of the added block ; 0 if the block is not added |
33,073 | public void setPermission ( String src , FsPermission permission ) throws IOException { INode [ ] inodes = null ; writeLock ( ) ; try { if ( isInSafeMode ( ) ) { throw new SafeModeException ( "Cannot set permission for " + src , safeMode ) ; } inodes = dir . getExistingPathINodes ( src ) ; if ( isPermissionCheckingEnabled ( inodes ) ) { checkOwner ( src , inodes ) ; } dir . setPermission ( src , permission ) ; } finally { writeUnlock ( ) ; } getEditLog ( ) . logSync ( false ) ; if ( auditLog . isInfoEnabled ( ) ) { logAuditEvent ( getCurrentUGI ( ) , Server . getRemoteIp ( ) , "setPermission" , src , null , getLastINode ( inodes ) ) ; } } | Set permissions for an existing file . |
33,074 | public void setOwner ( String src , String username , String group ) throws IOException { INode [ ] inodes = null ; writeLock ( ) ; try { if ( isInSafeMode ( ) ) { throw new SafeModeException ( "Cannot set permission for " + src , safeMode ) ; } inodes = dir . getExistingPathINodes ( src ) ; if ( isPermissionCheckingEnabled ( inodes ) ) { FSPermissionChecker pc = checkOwner ( src , inodes ) ; if ( ! pc . isSuper ) { if ( username != null && ! pc . user . equals ( username ) ) { if ( this . permissionAuditOnly ) { LOG . warn ( "PermissionAudit failed on " + src + ": non-super user cannot change owner." ) ; } else { throw new AccessControlException ( "Non-super user cannot change owner." ) ; } } if ( group != null && ! pc . containsGroup ( group ) ) { if ( this . permissionAuditOnly ) { LOG . warn ( "PermissionAudit failed on " + src + ": user does not belong to " + group + " ." ) ; } else { throw new AccessControlException ( "User does not belong to " + group + " ." ) ; } } } } dir . setOwner ( src , username , group ) ; } finally { writeUnlock ( ) ; } getEditLog ( ) . logSync ( false ) ; if ( auditLog . isInfoEnabled ( ) ) { logAuditEvent ( getCurrentUGI ( ) , Server . getRemoteIp ( ) , "setOwner" , src , null , getLastINode ( inodes ) ) ; } } | Set owner for an existing file . |
33,075 | LocatedBlocksWithMetaInfo updateDatanodeInfo ( LocatedBlocks locatedBlocks ) throws IOException { if ( locatedBlocks . getLocatedBlocks ( ) . size ( ) == 0 ) return new LocatedBlocksWithMetaInfo ( locatedBlocks . getFileLength ( ) , locatedBlocks . getLocatedBlocks ( ) , false , DataTransferProtocol . DATA_TRANSFER_VERSION , getNamespaceId ( ) , this . nameNode . getClientProtocolMethodsFingerprint ( ) ) ; List < LocatedBlock > newBlocks = new ArrayList < LocatedBlock > ( ) ; readLock ( ) ; try { for ( LocatedBlock locBlock : locatedBlocks . getLocatedBlocks ( ) ) { Block block = locBlock . getBlock ( ) ; int numNodes = blocksMap . numNodes ( block ) ; int numCorruptNodes = countNodes ( block ) . corruptReplicas ( ) ; int numCorruptReplicas = corruptReplicas . numCorruptReplicas ( block ) ; if ( numCorruptNodes != numCorruptReplicas ) { LOG . warn ( "Inconsistent number of corrupt replicas for " + block + "blockMap has " + numCorruptNodes + " but corrupt replicas map has " + numCorruptReplicas ) ; } boolean blockCorrupt = numCorruptNodes == numNodes ; int numMachineSet = blockCorrupt ? numNodes : ( numNodes - numCorruptNodes ) ; DatanodeDescriptor [ ] machineSet = new DatanodeDescriptor [ numMachineSet ] ; if ( numMachineSet > 0 ) { numNodes = 0 ; for ( Iterator < DatanodeDescriptor > it = blocksMap . nodeIterator ( block ) ; it . hasNext ( ) ; ) { DatanodeDescriptor dn = it . next ( ) ; boolean replicaCorrupt = corruptReplicas . isReplicaCorrupt ( block , dn ) ; if ( blockCorrupt || ( ! blockCorrupt && ! replicaCorrupt ) ) machineSet [ numNodes ++ ] = dn ; } } Block blockCopy = null ; if ( block != null ) { blockCopy = new Block ( block ) ; } LocatedBlock newBlock = new LocatedBlock ( blockCopy , machineSet , 0 , blockCorrupt ) ; newBlocks . add ( newBlock ) ; } } finally { readUnlock ( ) ; } return new LocatedBlocksWithMetaInfo ( locatedBlocks . getFileLength ( ) , newBlocks , false , DataTransferProtocol . DATA_TRANSFER_VERSION , getNamespaceId ( ) , this . nameNode . getClientProtocolMethodsFingerprint ( ) ) ; } | Updates DatanodeInfo for each LocatedBlock in locatedBlocks . |
33,076 | public void setTimes ( String src , long mtime , long atime ) throws IOException { if ( ! accessTimeTouchable && atime != - 1 ) { throw new AccessTimeException ( "setTimes is not allowed for accessTime" ) ; } setTimesInternal ( src , mtime , atime ) ; getEditLog ( ) . logSync ( false ) ; } | stores the modification and access time for this inode . The access time is precise upto an hour . The transaction if needed is written to the edits log but is not flushed . |
33,077 | private void verifyReplication ( String src , short replication , String clientName ) throws IOException { String text = "file " + src + ( ( clientName != null ) ? " on client " + clientName : "" ) + ".\n" + "Requested replication " + replication ; if ( replication > maxReplication ) { throw new IOException ( text + " exceeds maximum " + maxReplication ) ; } if ( replication < minReplication ) { throw new IOException ( text + " is less than the required minimum " + minReplication ) ; } } | Check whether the replication parameter is within the range determined by system configuration . |
33,078 | void startFile ( String src , PermissionStatus permissions , String holder , String clientMachine , boolean overwrite , boolean createParent , short replication , long blockSize ) throws IOException { INodeFileUnderConstruction file = startFileInternal ( src , permissions , holder , clientMachine , overwrite , false , createParent , replication , blockSize ) ; getEditLog ( ) . logSync ( false ) ; if ( auditLog . isInfoEnabled ( ) ) { logAuditEvent ( getCurrentUGI ( ) , Server . getRemoteIp ( ) , "create" , src , null , file ) ; } } | Create a new file entry in the namespace . |
33,079 | boolean recoverLease ( String src , String holder , String clientMachine , boolean discardLastBlock ) throws IOException { byte [ ] [ ] components = INodeDirectory . getPathComponents ( src ) ; writeLock ( ) ; try { if ( isInSafeMode ( ) ) { throw new SafeModeException ( "Cannot recover the lease of " + src , safeMode ) ; } INode inode = dir . getFileINode ( components ) ; if ( inode == null ) { throw new FileNotFoundException ( "File not found " + src ) ; } if ( ! inode . isUnderConstruction ( ) ) { return true ; } if ( isPermissionEnabled ) { INode [ ] inodes = dir . getExistingPathINodes ( src ) ; if ( isPermissionCheckingEnabled ( inodes ) ) { checkPathAccess ( src , inodes , FsAction . WRITE ) ; } } return recoverLeaseInternal ( inode , src , holder , clientMachine , true , discardLastBlock ) ; } finally { writeUnlock ( ) ; } } | Recover lease ; Immediately revoke the lease of the current lease holder and start lease recovery so that the file can be forced to be closed . |
33,080 | public LocatedBlock getAdditionalBlock ( String src , String clientName ) throws IOException { return getAdditionalBlock ( src , clientName , null ) ; } | Stub for old callers pre - HDFS - 630 |
33,081 | private DatanodeDescriptor [ ] findBestDatanodeInCluster ( List < DatanodeInfo > infos , int replication ) throws IOException { int targetReplication = Math . min ( infos . size ( ) , replication ) ; DatanodeDescriptor [ ] dns = new DatanodeDescriptor [ targetReplication ] ; boolean [ ] changedRacks = new boolean [ targetReplication ] ; boolean isOnSameRack = ( clusterMap . getNumOfRacks ( ) > 1 && targetReplication > 1 ) ; for ( int i = 0 ; i < targetReplication ; i ++ ) { DatanodeInfo info = infos . get ( i ) ; DatanodeDescriptor node = getDatanode ( info ) ; if ( node == null ) { node = host2DataNodeMap . getDataNodeByIpPort ( toHostPort ( info ) ) ; } if ( node == null && InjectionHandler . trueCondition ( InjectionEvent . FSNAMESYSTEM_STOP_LEASEMANAGER , info . getHost ( ) ) ) { node = host2DataNodeMap . getDatanodeByHost ( info . getHost ( ) ) ; } if ( node == null ) { if ( info . getNetworkLocation ( ) == null || info . getNetworkLocation ( ) . equals ( NetworkTopology . DEFAULT_RACK ) ) { resolveNetworkLocation ( info ) ; } List < Node > rackNodes = clusterMap . getDatanodesInRack ( info . getNetworkLocation ( ) ) ; if ( rackNodes != null ) { for ( Node rackNode : rackNodes ) { if ( ( ( DatanodeDescriptor ) rackNode ) . getHost ( ) . equals ( info . getHost ( ) ) ) { node = ( DatanodeDescriptor ) rackNode ; break ; } } if ( node == null && ! rackNodes . isEmpty ( ) ) { node = ( DatanodeDescriptor ) ( rackNodes . get ( r . nextInt ( rackNodes . size ( ) ) ) ) ; } } if ( node == null ) { node = ( DatanodeDescriptor ) clusterMap . chooseRandom ( NodeBase . ROOT ) ; LOG . info ( "ChooseTarget for favored nodes: " + toString ( infos ) + ". Node " + info + " changed its rack location to " + node ) ; changedRacks [ i ] = true ; } else { changedRacks [ i ] = false ; } } if ( node == null ) { throw new IOException ( "Could not find any node in the cluster for : " + info ) ; } dns [ i ] = node ; if ( i != 0 && isOnSameRack && ! clusterMap . isOnSameRack ( dns [ i ] , dns [ i - 1 ] ) ) { isOnSameRack = false ; } } if ( isOnSameRack ) { for ( int i = 0 ; i < targetReplication ; i ++ ) { if ( changedRacks [ i ] ) { dns [ i ] = ( DatanodeDescriptor ) clusterMap . chooseRandom ( NodeBase . ROOT , dns [ i ] . getNetworkLocation ( ) ) ; } } } return dns ; } | Given information about an array of datanodes returns an array of DatanodeDescriptors for the same or if it doesn t find the datanode it looks for a machine local and then rack local datanode if a rack local datanode is not possible either it returns the DatanodeDescriptor of any random node in the cluster . |
33,082 | private void setLastBlockSize ( INodeFileUnderConstruction pendingFile ) { Block block = pendingFile . getLastBlock ( ) ; if ( block != null ) { block . setNumBytes ( pendingFile . getPreferredBlockSize ( ) ) ; } } | Set last block s block size to be the file s default block size |
33,083 | private void replicateLastBlock ( String src , INodeFileUnderConstruction file ) { BlockInfo [ ] blks = file . getBlocks ( ) ; if ( blks == null || blks . length == 0 ) return ; BlockInfo block = blks [ blks . length - 1 ] ; DatanodeDescriptor [ ] targets = file . getValidTargets ( ) ; final int numOfTargets = targets == null ? 0 : targets . length ; NumberReplicas status = countNodes ( block ) ; int totalReplicas = status . getTotal ( ) ; if ( numOfTargets > totalReplicas ) { pendingReplications . add ( block , numOfTargets - totalReplicas ) ; } int expectedReplicas = file . getReplication ( ) ; if ( numOfTargets < expectedReplicas || status . decommissionedReplicas != 0 || status . corruptReplicas != 0 ) { LOG . info ( "Add " + block + " of " + src + " to needReplication queue: " + " numOfTargets = " + numOfTargets + " decomissionedReplicas = " + status . decommissionedReplicas + " corruptReplicas = " + status . corruptReplicas ) ; neededReplications . add ( block , status . liveReplicas , status . decommissionedReplicas , expectedReplicas ) ; } if ( numOfTargets < expectedReplicas ) { if ( numOfTargets == 1 ) { myFSMetrics . numNewBlocksWithOneReplica . inc ( ) ; } } else { myFSMetrics . numNewBlocksWithoutFailure . inc ( ) ; } myFSMetrics . numNewBlocks . inc ( ) ; } | Check last block of the file under construction Replicate it if it is under replicated |
33,084 | public boolean abandonBlock ( Block b , String src , String holder ) throws IOException { writeLock ( ) ; try { if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "BLOCK* NameSystem.abandonBlock: " + b + "of file " + src ) ; } if ( isInSafeMode ( ) ) { throw new SafeModeException ( "Cannot abandon block " + b + " for fle" + src , safeMode ) ; } INodeFileUnderConstruction file = checkLease ( src , holder ) ; dir . removeBlock ( src , file , b ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "BLOCK* NameSystem.abandonBlock: " + b + " is removed from pendingCreates" ) ; } return true ; } finally { writeUnlock ( ) ; } } | The client would like to let go of the given block |
33,085 | private INodeFileUnderConstruction checkLease ( String src , String holder ) throws IOException { INodeFile file = dir . getFileINode ( src ) ; return checkLease ( src , holder , file ) ; } | make sure that we still have the lease on this file . |
33,086 | private Block allocateBlock ( String src , INode [ ] inodes ) throws IOException { Block b = new Block ( generateBlockId ( ) , 0 , 0 ) ; while ( isValidBlock ( b ) ) { b . setBlockId ( generateBlockId ( ) ) ; } b . setGenerationStamp ( getGenerationStamp ( ) ) ; b = dir . addBlock ( src , inodes , b ) ; return b ; } | Allocate a block at the given pending filename |
33,087 | private Block [ ] allocateParityBlocks ( int numParityBlocks ) throws IOException { Block [ ] blocks = new Block [ numParityBlocks ] ; for ( int i = 0 ; i < numParityBlocks ; i ++ ) { Block b = new Block ( generateBlockId ( ) , 0 , 0 ) ; while ( isValidBlock ( b ) ) { b . setBlockId ( generateBlockId ( ) ) ; } b . setGenerationStamp ( getGenerationStamp ( ) ) ; blocks [ i ] = b ; } return blocks ; } | Allocate a number of parity blocks Require a write lock |
33,088 | boolean checkFileProgress ( INodeFile v , boolean checkall ) throws IOException { INode . enforceRegularStorageINode ( v , "checkFileProgress is not supported for non-regular files" ) ; if ( checkall ) { int closeFileReplicationMin = Math . min ( v . getReplication ( ) , this . minCloseReplication ) ; for ( Block block : v . getBlocks ( ) ) { if ( ! checkBlockProgress ( v , block , closeFileReplicationMin ) ) { return false ; } } return true ; } else { Block b = v . getStorage ( ) . getPenultimateBlock ( ) ; return checkBlockProgress ( v , b , minReplication ) ; } } | Check that the indicated file s blocks are present and replicated . If not return false . If checkall is true then check all blocks otherwise check only penultimate block . |
33,089 | void removeFromInvalidates ( String storageID ) { LightWeightHashSet < Block > blocks = recentInvalidateSets . remove ( storageID ) ; if ( blocks != null ) { pendingDeletionBlocksCount -= blocks . size ( ) ; } } | Remove a datanode from the invalidatesSet |
33,090 | void addToInvalidates ( Block b , DatanodeInfo n , boolean ackRequired ) { addToInvalidatesNoLog ( b , n , ackRequired ) ; if ( isInitialized && ! isInSafeModeInternal ( ) ) { NameNode . stateChangeLog . info ( "BLOCK* NameSystem.addToInvalidates: " + b . getBlockName ( ) + " is added to invalidSet of " + n . getName ( ) ) ; } } | Adds block to list of blocks which will be invalidated on specified datanode and log the move |
33,091 | void addToInvalidatesNoLog ( Block b , DatanodeInfo n , boolean ackRequired ) { if ( this . getNameNode ( ) . shouldRetryAbsentBlocks ( ) ) { return ; } LightWeightHashSet < Block > invalidateSet = recentInvalidateSets . get ( n . getStorageID ( ) ) ; if ( invalidateSet == null ) { invalidateSet = new LightWeightHashSet < Block > ( ) ; recentInvalidateSets . put ( n . getStorageID ( ) , invalidateSet ) ; } if ( ! ackRequired ) { b . setNumBytes ( BlockFlags . NO_ACK ) ; } if ( invalidateSet . add ( b ) ) { pendingDeletionBlocksCount ++ ; } } | Adds block to list of blocks which will be invalidated on specified datanode |
33,092 | private void addToInvalidates ( Block b , boolean ackRequired ) { StringBuilder sb = new StringBuilder ( ) ; for ( Iterator < DatanodeDescriptor > it = blocksMap . nodeIterator ( b ) ; it . hasNext ( ) ; ) { DatanodeDescriptor node = it . next ( ) ; addToInvalidatesNoLog ( b , node , ackRequired ) ; sb . append ( node . getName ( ) ) ; sb . append ( ' ' ) ; } if ( isInitialized && ! isInSafeMode ( ) ) { NameNode . stateChangeLog . info ( "BLOCK* NameSystem.addToInvalidates: " + b . getBlockName ( ) + " is added to invalidSet of " + sb ) ; } } | Adds block to list of blocks which will be invalidated on all its datanodes . |
33,093 | public void markBlockAsCorrupt ( Block blk , DatanodeInfo dn , final boolean parallelInitialBlockReport ) throws IOException { if ( ! parallelInitialBlockReport ) { writeLock ( ) ; } lockParallelBRLock ( parallelInitialBlockReport ) ; try { DatanodeDescriptor node = getDatanode ( dn ) ; if ( node == null ) { throw new IOException ( "Cannot mark block" + blk . getBlockName ( ) + " as corrupt because datanode " + dn . getName ( ) + " does not exist. " ) ; } final BlockInfo storedBlockInfo = blocksMap . getStoredBlock ( blk ) ; if ( storedBlockInfo == null ) { NameNode . stateChangeLog . info ( "BLOCK NameSystem.markBlockAsCorrupt: " + "block " + blk + " could not be marked " + "as corrupt as it does not exists in " + "blocksMap" ) ; } else { INodeFile inode = storedBlockInfo . getINode ( ) ; if ( inode == null ) { NameNode . stateChangeLog . info ( "BLOCK NameSystem.markBlockAsCorrupt: " + "block " + blk + " could not be marked " + "as corrupt as it does not belong to " + "any file" ) ; addToInvalidates ( storedBlockInfo , node , false ) ; return ; } if ( ! corruptReplicas . addToCorruptReplicasMap ( storedBlockInfo , node ) ) { return ; } NumberReplicas num = countNodes ( storedBlockInfo ) ; short blockReplication = inode . getBlockReplication ( storedBlockInfo ) ; if ( num . liveReplicas ( ) > blockReplication ) { invalidateBlock ( storedBlockInfo , node , true ) ; } else if ( isPopulatingReplQueuesInternal ( ) ) { int numCurrentReplicas = num . liveReplicas ( ) + pendingReplications . getNumReplicas ( storedBlockInfo ) ; updateNeededReplicationQueue ( storedBlockInfo , - 1 , numCurrentReplicas , num . decommissionedReplicas , node , blockReplication ) ; } } } finally { if ( ! parallelInitialBlockReport ) { writeUnlock ( ) ; } unlockParallelBRLock ( parallelInitialBlockReport ) ; } } | Mark the block belonging to datanode as corrupt |
33,094 | private void invalidateBlock ( Block blk , DatanodeInfo dn , boolean ackRequired ) throws IOException { NameNode . stateChangeLog . info ( "DIR* NameSystem.invalidateBlock: " + blk + " on " + dn . getName ( ) ) ; DatanodeDescriptor node = getDatanode ( dn ) ; if ( node == null ) { throw new IOException ( "Cannot invalidate block " + blk + " because datanode " + dn . getName ( ) + " does not exist." ) ; } int count = countNodes ( blk ) . liveReplicas ( ) ; if ( count > 1 ) { addToInvalidates ( blk , dn , ackRequired ) ; removeStoredBlock ( blk , node ) ; if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "BLOCK* NameSystem.invalidateBlocks: " + blk + " on " + dn . getName ( ) + " listed for deletion." ) ; } } else { NameNode . stateChangeLog . info ( "BLOCK* NameSystem.invalidateBlocks: " + blk + " on " + dn . getName ( ) + " is the only copy and was not deleted." ) ; } } | Invalidates the given block on the given datanode . |
33,095 | public boolean hardLinkTo ( String src , String dst ) throws IOException { INode dstNode = hardLinkToInternal ( src , dst ) ; getEditLog ( ) . logSync ( false ) ; if ( dstNode != null && auditLog . isInfoEnabled ( ) ) { logAuditEvent ( getCurrentUGI ( ) , Server . getRemoteIp ( ) , "hardlink" , src , dst , dstNode ) ; } return dstNode != null ; } | Create the hard link from src file to the dest file . |
33,096 | boolean deleteInternal ( String src , INode [ ] inodes , boolean recursive , boolean enforcePermission ) throws IOException { ArrayList < BlockInfo > collectedBlocks = new ArrayList < BlockInfo > ( ) ; INode targetNode = null ; byte [ ] [ ] components = inodes == null ? INodeDirectory . getPathComponents ( src ) : null ; writeLock ( ) ; try { if ( NameNode . stateChangeLog . isDebugEnabled ( ) ) { NameNode . stateChangeLog . debug ( "DIR* NameSystem.delete: " + src ) ; } if ( isInSafeMode ( ) ) { throw new SafeModeException ( "Cannot delete " + src , safeMode ) ; } if ( inodes == null ) { inodes = new INode [ components . length ] ; dir . rootDir . getExistingPathINodes ( components , inodes ) ; } if ( enforcePermission && isPermissionEnabled && isPermissionCheckingEnabled ( inodes ) ) { checkPermission ( src , inodes , false , null , FsAction . WRITE , null , FsAction . ALL ) ; } if ( neverDeletePaths . contains ( src ) ) { NameNode . stateChangeLog . warn ( "DIR* NameSystem.delete: " + " Trying to delete a whitelisted path " + src + " by user " + getCurrentUGI ( ) + " from server " + Server . getRemoteIp ( ) ) ; throw new IOException ( "Deleting a whitelisted directory is not allowed. " + src ) ; } if ( ( ! recursive ) && ( ! dir . isDirEmpty ( inodes [ inodes . length - 1 ] ) ) ) { throw new IOException ( src + " is non empty" ) ; } targetNode = dir . delete ( src , inodes , collectedBlocks , BLOCK_DELETION_INCREMENT ) ; if ( targetNode == null ) { return false ; } } finally { writeUnlock ( ) ; } List < INode > removedINodes = new ArrayList < INode > ( ) ; while ( targetNode . name != null ) { collectedBlocks . clear ( ) ; try { Thread . sleep ( 1 ) ; } catch ( InterruptedException e ) { throw new InterruptedIOException ( e . getMessage ( ) ) ; } writeLock ( ) ; try { int filesRemoved = targetNode . collectSubtreeBlocksAndClear ( collectedBlocks , BLOCK_DELETION_INCREMENT , removedINodes ) ; incrDeletedFileCount ( this , filesRemoved ) ; removeBlocks ( collectedBlocks ) ; dir . removeFromInodeMap ( removedINodes ) ; } finally { writeUnlock ( ) ; } } collectedBlocks . clear ( ) ; removedINodes . clear ( ) ; return true ; } | Remove the indicated filename from the namespace . This may invalidate some blocks that make up the file . |
33,097 | private void removeBlocks ( List < BlockInfo > blocks ) { if ( blocks == null ) { return ; } for ( BlockInfo b : blocks ) { removeFromExcessReplicateMap ( b ) ; neededReplications . remove ( b , - 1 ) ; corruptReplicas . removeFromCorruptReplicasMap ( b ) ; if ( pendingReplications != null ) { int replicas = pendingReplications . getNumReplicas ( b ) ; for ( int i = 0 ; i < replicas ; i ++ ) { pendingReplications . remove ( b ) ; } } addToInvalidates ( b , false ) ; blocksMap . removeBlock ( b ) ; } } | From the given list incrementally remove the blocks . Add the blocks to invalidates and set a flag that explicit ACK from DataNode is not required . This function should be used only for deleting entire files . |
33,098 | void removePathAndBlocks ( String src , List < BlockInfo > blocks ) throws IOException { assert ( ! nameNode . isRpcServerRunning ( ) || hasWriteLock ( ) ) ; leaseManager . removeLeaseWithPrefixPath ( src ) ; removeBlocks ( blocks ) ; } | Remove the blocks from the given list . Also remove the path . Add the blocks to invalidates and set a flag that explicit ACK from DataNode is not required . This function should be used only for deleting entire files . |
33,099 | void fsync ( String src , String clientName ) throws IOException { NameNode . stateChangeLog . info ( "BLOCK* NameSystem.fsync: file " + src + " for " + clientName ) ; writeLock ( ) ; try { if ( isInSafeMode ( ) ) { throw new SafeModeException ( "Cannot fsync file " + src , safeMode ) ; } INodeFileUnderConstruction pendingFile = checkLease ( src , clientName ) ; Block last = pendingFile . getLastBlock ( ) ; if ( last . getNumBytes ( ) == 0 ) { last . setNumBytes ( 1 ) ; } dir . persistBlocks ( src , pendingFile ) ; } finally { writeUnlock ( ) ; } getEditLog ( ) . logSync ( ) ; } | Persist all metadata about this file . |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.