repo
stringlengths 1
191
⌀ | file
stringlengths 23
351
| code
stringlengths 0
5.32M
| file_length
int64 0
5.32M
| avg_line_length
float64 0
2.9k
| max_line_length
int64 0
288k
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.apache.hadoop.test.GenericTestUtils.assertGlobEquals;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.junit.Test;
import com.google.common.base.Joiner;
/**
* Functional tests for NNStorageRetentionManager. This differs from
* {@link TestNNStorageRetentionManager} in that the other test suite
* is only unit/mock-based tests whereas this suite starts miniclusters,
* etc.
*/
public class TestNNStorageRetentionFunctional {
private static final File TEST_ROOT_DIR =
new File(MiniDFSCluster.getBaseDirectory());
private static final Log LOG = LogFactory.getLog(
TestNNStorageRetentionFunctional.class);
/**
* Test case where two directories are configured as NAME_AND_EDITS
* and one of them fails to save storage. Since the edits and image
* failure states are decoupled, the failure of image saving should
* not prevent the purging of logs from that dir.
*/
@Test
public void testPurgingWithNameEditsDirAfterFailure()
throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
File sd0 = new File(TEST_ROOT_DIR, "nn0");
File sd1 = new File(TEST_ROOT_DIR, "nn1");
File cd0 = new File(sd0, "current");
File cd1 = new File(sd1, "current");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
Joiner.on(",").join(sd0, sd1));
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.manageNameDfsDirs(false)
.format(true).build();
NameNode nn = cluster.getNameNode();
doSaveNamespace(nn);
LOG.info("After first save, images 0 and 2 should exist in both dirs");
assertGlobEquals(cd0, "fsimage_\\d*",
getImageFileName(0), getImageFileName(2));
assertGlobEquals(cd1, "fsimage_\\d*",
getImageFileName(0), getImageFileName(2));
assertGlobEquals(cd0, "edits_.*",
getFinalizedEditsFileName(1, 2),
getInProgressEditsFileName(3));
assertGlobEquals(cd1, "edits_.*",
getFinalizedEditsFileName(1, 2),
getInProgressEditsFileName(3));
doSaveNamespace(nn);
LOG.info("After second save, image 0 should be purged, " +
"and image 4 should exist in both.");
assertGlobEquals(cd0, "fsimage_\\d*",
getImageFileName(2), getImageFileName(4));
assertGlobEquals(cd1, "fsimage_\\d*",
getImageFileName(2), getImageFileName(4));
assertGlobEquals(cd0, "edits_.*",
getFinalizedEditsFileName(3, 4),
getInProgressEditsFileName(5));
assertGlobEquals(cd1, "edits_.*",
getFinalizedEditsFileName(3, 4),
getInProgressEditsFileName(5));
LOG.info("Failing first storage dir by chmodding it");
assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "000"));
doSaveNamespace(nn);
LOG.info("Restoring accessibility of first storage dir");
assertEquals(0, FileUtil.chmod(cd0.getAbsolutePath(), "755"));
LOG.info("nothing should have been purged in first storage dir");
assertGlobEquals(cd0, "fsimage_\\d*",
getImageFileName(2), getImageFileName(4));
assertGlobEquals(cd0, "edits_.*",
getFinalizedEditsFileName(3, 4),
getInProgressEditsFileName(5));
LOG.info("fsimage_2 should be purged in second storage dir");
assertGlobEquals(cd1, "fsimage_\\d*",
getImageFileName(4), getImageFileName(6));
assertGlobEquals(cd1, "edits_.*",
getFinalizedEditsFileName(5, 6),
getInProgressEditsFileName(7));
LOG.info("On next save, we should purge logs from the failed dir," +
" but not images, since the image directory is in failed state.");
doSaveNamespace(nn);
assertGlobEquals(cd1, "fsimage_\\d*",
getImageFileName(6), getImageFileName(8));
assertGlobEquals(cd1, "edits_.*",
getFinalizedEditsFileName(7, 8),
getInProgressEditsFileName(9));
assertGlobEquals(cd0, "fsimage_\\d*",
getImageFileName(2), getImageFileName(4));
assertGlobEquals(cd0, "edits_.*",
getInProgressEditsFileName(9));
} finally {
FileUtil.chmod(cd0.getAbsolutePath(), "755");
LOG.info("Shutting down...");
if (cluster != null) {
cluster.shutdown();
}
}
}
private static void doSaveNamespace(NameNode nn) throws IOException {
LOG.info("Saving namespace...");
nn.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
nn.getRpcServer().saveNamespace();
nn.getRpcServer().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
}
}
| 6,287
| 38.54717
| 90
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourcePolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.Collection;
import org.junit.Test;
public class TestNameNodeResourcePolicy {
@Test
public void testSingleRedundantResource() {
assertTrue(testResourceScenario(1, 0, 0, 0, 1));
assertFalse(testResourceScenario(1, 0, 1, 0, 1));
}
@Test
public void testSingleRequiredResource() {
assertTrue(testResourceScenario(0, 1, 0, 0, 0));
assertFalse(testResourceScenario(0, 1, 0, 1, 0));
}
@Test
public void testMultipleRedundantResources() {
assertTrue(testResourceScenario(4, 0, 0, 0, 4));
assertFalse(testResourceScenario(4, 0, 1, 0, 4));
assertTrue(testResourceScenario(4, 0, 1, 0, 3));
assertFalse(testResourceScenario(4, 0, 2, 0, 3));
assertTrue(testResourceScenario(4, 0, 2, 0, 2));
assertFalse(testResourceScenario(4, 0, 3, 0, 2));
assertTrue(testResourceScenario(4, 0, 3, 0, 1));
assertFalse(testResourceScenario(4, 0, 4, 0, 1));
assertFalse(testResourceScenario(1, 0, 0, 0, 2));
}
@Test
public void testMultipleRequiredResources() {
assertTrue(testResourceScenario(0, 3, 0, 0, 0));
assertFalse(testResourceScenario(0, 3, 0, 1, 0));
assertFalse(testResourceScenario(0, 3, 0, 2, 0));
assertFalse(testResourceScenario(0, 3, 0, 3, 0));
}
@Test
public void testRedundantWithRequiredResources() {
assertTrue(testResourceScenario(2, 2, 0, 0, 1));
assertTrue(testResourceScenario(2, 2, 1, 0, 1));
assertFalse(testResourceScenario(2, 2, 2, 0, 1));
assertFalse(testResourceScenario(2, 2, 0, 1, 1));
assertFalse(testResourceScenario(2, 2, 1, 1, 1));
assertFalse(testResourceScenario(2, 2, 2, 1, 1));
}
private static boolean testResourceScenario(
int numRedundantResources,
int numRequiredResources,
int numFailedRedundantResources,
int numFailedRequiredResources,
int minimumRedundantResources) {
Collection<CheckableNameNodeResource> resources =
new ArrayList<CheckableNameNodeResource>();
for (int i = 0; i < numRedundantResources; i++) {
CheckableNameNodeResource r = mock(CheckableNameNodeResource.class);
when(r.isRequired()).thenReturn(false);
when(r.isResourceAvailable()).thenReturn(i >= numFailedRedundantResources);
resources.add(r);
}
for (int i = 0; i < numRequiredResources; i++) {
CheckableNameNodeResource r = mock(CheckableNameNodeResource.class);
when(r.isRequired()).thenReturn(true);
when(r.isResourceAvailable()).thenReturn(i >= numFailedRequiredResources);
resources.add(r);
}
return NameNodeResourcePolicy.areResourcesAvailable(resources,
minimumRedundantResources);
}
}
| 3,736
| 35.281553
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.commons.io.FileUtils;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.top.TopConf;
import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
import org.apache.hadoop.util.VersionInfo;
import org.codehaus.jackson.map.ObjectMapper;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import java.io.File;
import java.lang.management.ManagementFactory;
import java.net.URI;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
/**
* Class for testing {@link NameNodeMXBean} implementation
*/
public class TestNameNodeMXBean {
/**
* Used to assert equality between doubles
*/
private static final double DELTA = 0.000001;
static {
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
}
@SuppressWarnings({ "unchecked" })
@Test
public void testNameNodeMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMORY_KEY,
NativeIO.POSIX.getCacheManipulator().getMemlockLimit());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
// get attribute "ClusterId"
String clusterId = (String) mbs.getAttribute(mxbeanName, "ClusterId");
assertEquals(fsn.getClusterId(), clusterId);
// get attribute "BlockPoolId"
String blockpoolId = (String) mbs.getAttribute(mxbeanName,
"BlockPoolId");
assertEquals(fsn.getBlockPoolId(), blockpoolId);
// get attribute "Version"
String version = (String) mbs.getAttribute(mxbeanName, "Version");
assertEquals(fsn.getVersion(), version);
assertTrue(version.equals(VersionInfo.getVersion()
+ ", r" + VersionInfo.getRevision()));
// get attribute "Used"
Long used = (Long) mbs.getAttribute(mxbeanName, "Used");
assertEquals(fsn.getUsed(), used.longValue());
// get attribute "Total"
Long total = (Long) mbs.getAttribute(mxbeanName, "Total");
assertEquals(fsn.getTotal(), total.longValue());
// get attribute "safemode"
String safemode = (String) mbs.getAttribute(mxbeanName, "Safemode");
assertEquals(fsn.getSafemode(), safemode);
// get attribute nondfs
Long nondfs = (Long) (mbs.getAttribute(mxbeanName, "NonDfsUsedSpace"));
assertEquals(fsn.getNonDfsUsedSpace(), nondfs.longValue());
// get attribute percentremaining
Float percentremaining = (Float) (mbs.getAttribute(mxbeanName,
"PercentRemaining"));
assertEquals(fsn.getPercentRemaining(), percentremaining, DELTA);
// get attribute Totalblocks
Long totalblocks = (Long) (mbs.getAttribute(mxbeanName, "TotalBlocks"));
assertEquals(fsn.getTotalBlocks(), totalblocks.longValue());
// get attribute alivenodeinfo
String alivenodeinfo = (String) (mbs.getAttribute(mxbeanName,
"LiveNodes"));
Map<String, Map<String, Object>> liveNodes =
(Map<String, Map<String, Object>>) JSON.parse(alivenodeinfo);
assertTrue(liveNodes.size() == 2);
for (Map<String, Object> liveNode : liveNodes.values()) {
assertTrue(liveNode.containsKey("nonDfsUsedSpace"));
assertTrue(((Long)liveNode.get("nonDfsUsedSpace")) > 0);
assertTrue(liveNode.containsKey("capacity"));
assertTrue(((Long)liveNode.get("capacity")) > 0);
assertTrue(liveNode.containsKey("numBlocks"));
assertTrue(((Long)liveNode.get("numBlocks")) == 0);
}
assertEquals(fsn.getLiveNodes(), alivenodeinfo);
// get attribute deadnodeinfo
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
// get attribute NodeUsage
String nodeUsage = (String) (mbs.getAttribute(mxbeanName,
"NodeUsage"));
assertEquals("Bad value for NodeUsage", fsn.getNodeUsage(), nodeUsage);
// get attribute NameJournalStatus
String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName,
"NameJournalStatus"));
assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus);
// get attribute JournalTransactionInfo
String journalTxnInfo = (String) mbs.getAttribute(mxbeanName,
"JournalTransactionInfo");
assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(),
journalTxnInfo);
// get attribute "NNStarted"
String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted");
assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted);
// get attribute "CompileInfo"
String compileInfo = (String) mbs.getAttribute(mxbeanName, "CompileInfo");
assertEquals("Bad value for CompileInfo", fsn.getCompileInfo(), compileInfo);
// get attribute CorruptFiles
String corruptFiles = (String) (mbs.getAttribute(mxbeanName,
"CorruptFiles"));
assertEquals("Bad value for CorruptFiles", fsn.getCorruptFiles(), corruptFiles);
// get attribute NameDirStatuses
String nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
"NameDirStatuses"));
assertEquals(fsn.getNameDirStatuses(), nameDirStatuses);
Map<String, Map<String, String>> statusMap =
(Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
Collection<URI> nameDirUris = cluster.getNameDirs(0);
for (URI nameDirUri : nameDirUris) {
File nameDir = new File(nameDirUri);
System.out.println("Checking for the presence of " + nameDir +
" in active name dirs.");
assertTrue(statusMap.get("active").containsKey(nameDir.getAbsolutePath()));
}
assertEquals(2, statusMap.get("active").size());
assertEquals(0, statusMap.get("failed").size());
// This will cause the first dir to fail.
File failedNameDir = new File(nameDirUris.iterator().next());
assertEquals(0, FileUtil.chmod(
new File(failedNameDir, "current").getAbsolutePath(), "000"));
cluster.getNameNodeRpc().rollEditLog();
nameDirStatuses = (String) (mbs.getAttribute(mxbeanName,
"NameDirStatuses"));
statusMap = (Map<String, Map<String, String>>) JSON.parse(nameDirStatuses);
for (URI nameDirUri : nameDirUris) {
File nameDir = new File(nameDirUri);
String expectedStatus =
nameDir.equals(failedNameDir) ? "failed" : "active";
System.out.println("Checking for the presence of " + nameDir +
" in " + expectedStatus + " name dirs.");
assertTrue(statusMap.get(expectedStatus).containsKey(
nameDir.getAbsolutePath()));
}
assertEquals(1, statusMap.get("active").size());
assertEquals(1, statusMap.get("failed").size());
assertEquals(0L, mbs.getAttribute(mxbeanName, "CacheUsed"));
assertEquals(NativeIO.POSIX.getCacheManipulator().getMemlockLimit() *
cluster.getDataNodes().size(),
mbs.getAttribute(mxbeanName, "CacheCapacity"));
assertNull("RollingUpgradeInfo should be null when there is no rolling"
+ " upgrade", mbs.getAttribute(mxbeanName, "RollingUpgradeStatus"));
} finally {
if (cluster != null) {
for (URI dir : cluster.getNameDirs(0)) {
FileUtil.chmod(
new File(new File(dir), "current").getAbsolutePath(), "755");
}
cluster.shutdown();
}
}
}
@SuppressWarnings({ "unchecked" })
@Test
public void testLastContactTime() throws Exception {
Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null;
FileSystem localFileSys = null;
Path dir = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
// Define include file to generate deadNodes metrics
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
dir = new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
for(DataNode dn : cluster.getDataNodes()) {
includeHosts.append(dn.getDisplayName()).append("\n");
}
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
fsn.getBlockManager().getDatanodeManager().refreshNodes(conf);
cluster.stopDataNode(0);
while (fsn.getBlockManager().getDatanodeManager().getNumLiveDataNodes()
!= 2 ) {
Uninterruptibles.sleepUninterruptibly(1, TimeUnit.SECONDS);
}
// get attribute deadnodeinfo
String deadnodeinfo = (String) (mbs.getAttribute(mxbeanName,
"DeadNodes"));
assertEquals(fsn.getDeadNodes(), deadnodeinfo);
Map<String, Map<String, Object>> deadNodes =
(Map<String, Map<String, Object>>) JSON.parse(deadnodeinfo);
assertTrue(deadNodes.size() > 0);
for (Map<String, Object> deadNode : deadNodes.values()) {
assertTrue(deadNode.containsKey("lastContact"));
assertTrue(deadNode.containsKey("decommissioned"));
assertTrue(deadNode.containsKey("xferaddr"));
}
} finally {
if ((localFileSys != null) && localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=120000)
@SuppressWarnings("unchecked")
public void testTopUsers() throws Exception {
final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
FileSystem fs = cluster.getFileSystem();
final Path path = new Path("/");
final int NUM_OPS = 10;
for (int i=0; i< NUM_OPS; i++) {
fs.listStatus(path);
fs.setTimes(path, 0, 1);
}
String topUsers =
(String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
ObjectMapper mapper = new ObjectMapper();
Map<String, Object> map = mapper.readValue(topUsers, Map.class);
assertTrue("Could not find map key timestamp",
map.containsKey("timestamp"));
assertTrue("Could not find map key windows", map.containsKey("windows"));
List<Map<String, List<Map<String, Object>>>> windows =
(List<Map<String, List<Map<String, Object>>>>) map.get("windows");
assertEquals("Unexpected num windows", 3, windows.size());
for (Map<String, List<Map<String, Object>>> window : windows) {
final List<Map<String, Object>> ops = window.get("ops");
assertEquals("Unexpected num ops", 3, ops.size());
for (Map<String, Object> op: ops) {
final long count = Long.parseLong(op.get("totalCount").toString());
final String opType = op.get("opType").toString();
final int expected;
if (opType.equals(TopConf.ALL_CMDS)) {
expected = 2*NUM_OPS;
} else {
expected = NUM_OPS;
}
assertEquals("Unexpected total count", expected, count);
}
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=120000)
public void testTopUsersDisabled() throws Exception {
final Configuration conf = new Configuration();
// Disable nntop
conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, false);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
FileSystem fs = cluster.getFileSystem();
final Path path = new Path("/");
final int NUM_OPS = 10;
for (int i=0; i< NUM_OPS; i++) {
fs.listStatus(path);
fs.setTimes(path, 0, 1);
}
String topUsers =
(String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
assertNull("Did not expect to find TopUserOpCounts bean!", topUsers);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=120000)
public void testTopUsersNoPeriods() throws Exception {
final Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.NNTOP_ENABLED_KEY, true);
conf.set(DFSConfigKeys.NNTOP_WINDOWS_MINUTES_KEY, "");
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanNameFsns = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
FileSystem fs = cluster.getFileSystem();
final Path path = new Path("/");
final int NUM_OPS = 10;
for (int i=0; i< NUM_OPS; i++) {
fs.listStatus(path);
fs.setTimes(path, 0, 1);
}
String topUsers =
(String) (mbs.getAttribute(mxbeanNameFsns, "TopUserOpCounts"));
assertNotNull("Expected TopUserOpCounts bean!", topUsers);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 15,970
| 41.028947
| 101
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockIdManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Test various failure scenarios during saveNamespace() operation.
* Cases covered:
* <ol>
* <li>Recover from failure while saving into the second storage directory</li>
* <li>Recover from failure while moving current into lastcheckpoint.tmp</li>
* <li>Recover from failure while moving lastcheckpoint.tmp into
* previous.checkpoint</li>
* <li>Recover from failure while rolling edits file</li>
* </ol>
*/
public class TestSaveNamespace {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
}
private static final Log LOG = LogFactory.getLog(TestSaveNamespace.class);
private static class FaultySaveImage implements Answer<Void> {
int count = 0;
boolean throwRTE = true;
// generate either a RuntimeException or IOException
public FaultySaveImage(boolean throwRTE) {
this.throwRTE = throwRTE;
}
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
StorageDirectory sd = (StorageDirectory)args[1];
if (count++ == 1) {
LOG.info("Injecting fault for sd: " + sd);
if (throwRTE) {
throw new RuntimeException("Injected fault: saveFSImage second time");
} else {
throw new IOException("Injected fault: saveFSImage second time");
}
}
LOG.info("Not injecting fault for sd: " + sd);
return (Void)invocation.callRealMethod();
}
}
private enum Fault {
SAVE_SECOND_FSIMAGE_RTE,
SAVE_SECOND_FSIMAGE_IOE,
SAVE_ALL_FSIMAGES,
WRITE_STORAGE_ALL,
WRITE_STORAGE_ONE
};
private void saveNamespaceWithInjectedFault(Fault fault) throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
Whitebox.setInternalState(fsn, "fsImage", spyImage);
boolean shouldFail = false; // should we expect the save operation to fail
// inject fault
switch(fault) {
case SAVE_SECOND_FSIMAGE_RTE:
// The spy throws a RuntimeException when writing to the second directory
doAnswer(new FaultySaveImage(true)).
when(spyImage).saveFSImage(
(SaveNamespaceContext)anyObject(),
(StorageDirectory)anyObject(), (NameNodeFile) anyObject());
shouldFail = false;
break;
case SAVE_SECOND_FSIMAGE_IOE:
// The spy throws an IOException when writing to the second directory
doAnswer(new FaultySaveImage(false)).
when(spyImage).saveFSImage(
(SaveNamespaceContext)anyObject(),
(StorageDirectory)anyObject(), (NameNodeFile) anyObject());
shouldFail = false;
break;
case SAVE_ALL_FSIMAGES:
// The spy throws IOException in all directories
doThrow(new RuntimeException("Injected")).
when(spyImage).saveFSImage(
(SaveNamespaceContext)anyObject(),
(StorageDirectory)anyObject(), (NameNodeFile) anyObject());
shouldFail = true;
break;
case WRITE_STORAGE_ALL:
// The spy throws an exception before writing any VERSION files
doThrow(new RuntimeException("Injected"))
.when(spyStorage).writeAll();
shouldFail = true;
break;
case WRITE_STORAGE_ONE:
// The spy throws on exception on one particular storage directory
doAnswer(new FaultySaveImage(true))
.when(spyStorage).writeProperties((StorageDirectory)anyObject());
// TODO: unfortunately this fails -- should be improved.
// See HDFS-2173.
shouldFail = true;
break;
}
try {
doAnEdit(fsn, 1);
// Save namespace - this may fail, depending on fault injected
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
fsn.saveNamespace();
if (shouldFail) {
fail("Did not fail!");
}
} catch (Exception e) {
if (! shouldFail) {
throw e;
} else {
LOG.info("Test caught expected exception", e);
}
}
fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// Should still be able to perform edits
doAnEdit(fsn, 2);
// Now shut down and restart the namesystem
originalImage.close();
fsn.close();
fsn = null;
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edits.
checkEditExists(fsn, 1);
checkEditExists(fsn, 2);
} finally {
if (fsn != null) {
fsn.close();
}
}
}
/**
* Verify that a saveNamespace command brings faulty directories
* in fs.name.dir and fs.edit.dir back online.
*/
@Test (timeout=30000)
public void testReinsertnamedirsInSavenamespace() throws Exception {
// create a configuration with the key to restore error
// directories in fs.name.dir
Configuration conf = getConf();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
FSImage spyImage = spy(originalImage);
Whitebox.setInternalState(fsn, "fsImage", spyImage);
FileSystem fs = FileSystem.getLocal(conf);
File rootDir = storage.getStorageDir(0).getRoot();
Path rootPath = new Path(rootDir.getPath(), "current");
final FsPermission permissionNone = new FsPermission((short) 0);
final FsPermission permissionAll = new FsPermission(
FsAction.ALL, FsAction.READ_EXECUTE, FsAction.READ_EXECUTE);
fs.setPermission(rootPath, permissionNone);
try {
doAnEdit(fsn, 1);
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
// Save namespace - should mark the first storage dir as faulty
// since it's not traversable.
LOG.info("Doing the first savenamespace.");
fsn.saveNamespace();
LOG.info("First savenamespace sucessful.");
assertTrue("Savenamespace should have marked one directory as bad." +
" But found " + storage.getRemovedStorageDirs().size() +
" bad directories.",
storage.getRemovedStorageDirs().size() == 1);
fs.setPermission(rootPath, permissionAll);
// The next call to savenamespace should try inserting the
// erroneous directory back to fs.name.dir. This command should
// be successful.
LOG.info("Doing the second savenamespace.");
fsn.saveNamespace();
LOG.warn("Second savenamespace sucessful.");
assertTrue("Savenamespace should have been successful in removing " +
" bad directories from Image." +
" But found " + storage.getRemovedStorageDirs().size() +
" bad directories.",
storage.getRemovedStorageDirs().size() == 0);
// Now shut down and restart the namesystem
LOG.info("Shutting down fsimage.");
originalImage.close();
fsn.close();
fsn = null;
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
LOG.info("Loading new FSmage from disk.");
fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edit.
LOG.info("Checking reloaded image.");
checkEditExists(fsn, 1);
LOG.info("Reloaded image is good.");
} finally {
if (rootDir.exists()) {
fs.setPermission(rootPath, permissionAll);
}
if (fsn != null) {
try {
fsn.close();
} catch (Throwable t) {
LOG.fatal("Failed to shut down", t);
}
}
}
}
@Test (timeout=30000)
public void testRTEWhileSavingSecondImage() throws Exception {
saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_RTE);
}
@Test (timeout=30000)
public void testIOEWhileSavingSecondImage() throws Exception {
saveNamespaceWithInjectedFault(Fault.SAVE_SECOND_FSIMAGE_IOE);
}
@Test (timeout=30000)
public void testCrashInAllImageDirs() throws Exception {
saveNamespaceWithInjectedFault(Fault.SAVE_ALL_FSIMAGES);
}
@Test (timeout=30000)
public void testCrashWhenWritingVersionFiles() throws Exception {
saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ALL);
}
@Test (timeout=30000)
public void testCrashWhenWritingVersionFileInOneDir() throws Exception {
saveNamespaceWithInjectedFault(Fault.WRITE_STORAGE_ONE);
}
/**
* Test case where savenamespace fails in all directories
* and then the NN shuts down. Here we should recover from the
* failed checkpoint since it only affected ".ckpt" files, not
* valid image files
*/
@Test (timeout=30000)
public void testFailedSaveNamespace() throws Exception {
doTestFailedSaveNamespace(false);
}
/**
* Test case where saveNamespace fails in all directories, but then
* the operator restores the directories and calls it again.
* This should leave the NN in a clean state for next start.
*/
@Test (timeout=30000)
public void testFailedSaveNamespaceWithRecovery() throws Exception {
doTestFailedSaveNamespace(true);
}
/**
* Injects a failure on all storage directories while saving namespace.
*
* @param restoreStorageAfterFailure if true, will try to save again after
* clearing the failure injection
*/
public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure)
throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
final FSImage originalImage = fsn.getFSImage();
NNStorage storage = originalImage.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
NNStorage spyStorage = spy(storage);
originalImage.storage = spyStorage;
FSImage spyImage = spy(originalImage);
Whitebox.setInternalState(fsn, "fsImage", spyImage);
spyImage.storage.setStorageDirectories(
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
doThrow(new IOException("Injected fault: saveFSImage")).
when(spyImage).saveFSImage(
(SaveNamespaceContext)anyObject(),
(StorageDirectory)anyObject(), (NameNodeFile) anyObject());
try {
doAnEdit(fsn, 1);
// Save namespace
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
fsn.saveNamespace();
fail("saveNamespace did not fail even when all directories failed!");
} catch (IOException ioe) {
LOG.info("Got expected exception", ioe);
}
// Ensure that, if storage dirs come back online, things work again.
if (restoreStorageAfterFailure) {
Mockito.reset(spyImage);
spyStorage.setRestoreFailedStorage(true);
fsn.saveNamespace();
checkEditExists(fsn, 1);
}
// Now shut down and restart the NN
originalImage.close();
fsn.close();
fsn = null;
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edits.
checkEditExists(fsn, 1);
} finally {
if (fsn != null) {
fsn.close();
}
}
}
@Test (timeout=30000)
public void testSaveWhileEditsRolled() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
try {
doAnEdit(fsn, 1);
CheckpointSignature sig = fsn.rollEditLog();
LOG.warn("Checkpoint signature: " + sig);
// Do another edit
doAnEdit(fsn, 2);
// Save namespace
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fsn.saveNamespace();
// Now shut down and restart the NN
fsn.close();
fsn = null;
// Start a new namesystem, which should be able to recover
// the namespace from the previous incarnation.
fsn = FSNamesystem.loadFromDisk(conf);
// Make sure the image loaded including our edits.
checkEditExists(fsn, 1);
checkEditExists(fsn, 2);
} finally {
if (fsn != null) {
fsn.close();
}
}
}
@Test (timeout=30000)
public void testTxIdPersistence() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
try {
// We have a BEGIN_LOG_SEGMENT txn to start
assertEquals(1, fsn.getEditLog().getLastWrittenTxId());
doAnEdit(fsn, 1);
assertEquals(2, fsn.getEditLog().getLastWrittenTxId());
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fsn.saveNamespace();
// 2 more txns: END the first segment, BEGIN a new one
assertEquals(4, fsn.getEditLog().getLastWrittenTxId());
// Shut down and restart
fsn.getFSImage().close();
fsn.close();
// 1 more txn to END that segment
assertEquals(5, fsn.getEditLog().getLastWrittenTxId());
fsn = null;
fsn = FSNamesystem.loadFromDisk(conf);
// 1 more txn to start new segment on restart
assertEquals(6, fsn.getEditLog().getLastWrittenTxId());
} finally {
if (fsn != null) {
fsn.close();
}
}
}
/**
* Test for save namespace should succeed when parent directory renamed with
* open lease and destination directory exist.
* This test is a regression for HDFS-2827
*/
@Test
public void testSaveNamespaceWithRenamedLease() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
.numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
OutputStream out = null;
try {
fs.mkdirs(new Path("/test-target"));
out = fs.create(new Path("/test-source/foo")); // don't close
fs.rename(new Path("/test-source/"), new Path("/test-target/"));
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
} finally {
IOUtils.cleanup(LOG, out, fs);
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=20000)
public void testCancelSaveNamespace() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
// Replace the FSImage with a spy
final FSImage image = fsn.getFSImage();
NNStorage storage = image.getStorage();
storage.close(); // unlock any directories that FSNamesystem's initialization may have locked
storage.setStorageDirectories(
FSNamesystem.getNamespaceDirs(conf),
FSNamesystem.getNamespaceEditsDirs(conf));
FSNamesystem spyFsn = spy(fsn);
final FSNamesystem finalFsn = spyFsn;
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
BlockIdManager bid = spy(spyFsn.getBlockIdManager());
Whitebox.setInternalState(finalFsn, "blockIdManager", bid);
doAnswer(delayer).when(bid).getGenerationStampV2();
ExecutorService pool = Executors.newFixedThreadPool(2);
try {
doAnEdit(fsn, 1);
final Canceler canceler = new Canceler();
// Save namespace
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
Future<Void> saverFuture = pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
image.saveNamespace(finalFsn, NameNodeFile.IMAGE, canceler);
return null;
}
});
// Wait until saveNamespace calls getGenerationStamp
delayer.waitForCall();
// then cancel the saveNamespace
Future<Void> cancelFuture = pool.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
canceler.cancel("cancelled");
return null;
}
});
// give the cancel call time to run
Thread.sleep(500);
// allow saveNamespace to proceed - it should check the cancel flag after
// this point and throw an exception
delayer.proceed();
cancelFuture.get();
saverFuture.get();
fail("saveNamespace did not fail even though cancelled!");
} catch (Throwable t) {
GenericTestUtils.assertExceptionContains(
"SaveNamespaceCancelledException", t);
}
LOG.info("Successfully cancelled a saveNamespace");
// Check that we have only the original image and not any
// cruft left over from half-finished images
FSImageTestUtil.logStorageContents(LOG, storage);
for (StorageDirectory sd : storage.dirIterable(null)) {
File curDir = sd.getCurrentDir();
GenericTestUtils.assertGlobEquals(curDir, "fsimage_.*",
NNStorage.getImageFileName(0),
NNStorage.getImageFileName(0) + MD5FileUtils.MD5_SUFFIX);
}
} finally {
fsn.close();
}
}
@Test (timeout=30000)
public void testSaveNamespaceWithDanglingLease() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new Configuration())
.numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
try {
cluster.getNamesystem().leaseManager.addLease("me",
INodeId.ROOT_INODE_ID + 1);
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private void doAnEdit(FSNamesystem fsn, int id) throws IOException {
// Make an edit
fsn.mkdirs(
"/test" + id,
new PermissionStatus("test", "Test",
new FsPermission((short)0777)),
true);
}
private void checkEditExists(FSNamesystem fsn, int id) throws IOException {
// Make sure the image loaded including our edit.
assertNotNull(fsn.getFileInfo("/test" + id, false));
}
private Configuration getConf() throws IOException {
String baseDir = MiniDFSCluster.getBaseDirectory();
String nameDirs = fileAsURI(new File(baseDir, "name1")) + "," +
fileAsURI(new File(baseDir, "name2"));
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDirs);
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDirs);
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
return conf;
}
}
| 22,876
| 33.767477
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAllowFormat.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.TestGenericJournalConf.DummyJournalManager;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Startup and format tests
*
*/
public class TestAllowFormat {
public static final String NAME_NODE_HOST = "localhost:";
public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
private static final Log LOG =
LogFactory.getLog(TestAllowFormat.class.getName());
private static final File DFS_BASE_DIR = new File(PathUtils.getTestDir(TestAllowFormat.class), "dfs");
private static Configuration config;
private static MiniDFSCluster cluster = null;
@BeforeClass
public static void setUp() throws Exception {
config = new Configuration();
if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
throw new IOException("Could not delete hdfs directory '" + DFS_BASE_DIR +
"'");
}
// Test has multiple name directories.
// Format should not really prompt us if one of the directories exist,
// but is empty. So in case the test hangs on an input, it means something
// could be wrong in the format prompting code. (HDFS-1636)
LOG.info("hdfsdir is " + DFS_BASE_DIR.getAbsolutePath());
File nameDir1 = new File(DFS_BASE_DIR, "name1");
File nameDir2 = new File(DFS_BASE_DIR, "name2");
// To test multiple directory handling, we pre-create one of the name directories.
nameDir1.mkdirs();
// Set multiple name directories.
config.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir1.getPath() + "," + nameDir2.getPath());
config.set(DFS_DATANODE_DATA_DIR_KEY, new File(DFS_BASE_DIR, "data").getPath());
config.set(DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(DFS_BASE_DIR, "secondary").getPath());
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
}
/**
* clean up
*/
@AfterClass
public static void tearDown() throws Exception {
if (cluster!=null) {
cluster.shutdown();
LOG.info("Stopping mini cluster");
}
if ( DFS_BASE_DIR.exists() && !FileUtil.fullyDelete(DFS_BASE_DIR) ) {
throw new IOException("Could not delete hdfs directory in tearDown '"
+ DFS_BASE_DIR + "'");
}
}
/**
* start MiniDFScluster, try formatting with different settings
* @throws IOException
* @throws InterruptedException
*/
@Test
public void testAllowFormat() throws IOException {
LOG.info("--starting mini cluster");
// manage dirs parameter set to false
NameNode nn;
// 1. Create a new cluster and format DFS
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
cluster = new MiniDFSCluster.Builder(config).manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
assertNotNull(cluster);
nn = cluster.getNameNode();
assertNotNull(nn);
LOG.info("Mini cluster created OK");
// 2. Try formatting DFS with allowformat false.
// NOTE: the cluster must be shut down for format to work.
LOG.info("Verifying format will fail with allowformat false");
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, false);
try {
cluster.shutdown();
NameNode.format(config);
fail("Format succeeded, when it should have failed");
} catch (IOException e) { // expected to fail
// Verify we got message we expected
assertTrue("Exception was not about formatting Namenode",
e.getMessage().startsWith("The option " +
DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY));
LOG.info("Expected failure: " + StringUtils.stringifyException(e));
LOG.info("Done verifying format will fail with allowformat false");
}
// 3. Try formatting DFS with allowformat true
LOG.info("Verifying format will succeed with allowformat true");
config.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
NameNode.format(config);
LOG.info("Done verifying format will succeed with allowformat true");
}
/**
* Test to skip format for non file scheme directory configured
*
* @throws Exception
*/
@Test
public void testFormatShouldBeIgnoredForNonFileBasedDirs() throws Exception {
Configuration conf = new HdfsConfiguration();
String logicalName = "mycluster";
// DFS_NAMENODE_RPC_ADDRESS_KEY are required to identify the NameNode
// is configured in HA, then only DFS_NAMENODE_SHARED_EDITS_DIR_KEY
// is considered.
String localhost = "127.0.0.1";
InetSocketAddress nnAddr1 = new InetSocketAddress(localhost, 8020);
InetSocketAddress nnAddr2 = new InetSocketAddress(localhost, 9020);
HATestUtil.setFailoverConfigurations(conf, logicalName, nnAddr1, nnAddr2);
conf.set(DFS_NAMENODE_NAME_DIR_KEY,
new File(DFS_BASE_DIR, "name").getAbsolutePath());
conf.setBoolean(DFS_NAMENODE_SUPPORT_ALLOW_FORMAT_KEY, true);
conf.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_EDITS_PLUGIN_PREFIX, "dummy"),
DummyJournalManager.class.getName());
conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, "dummy://"
+ localhost + ":2181/ledgers");
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
// An internal assert is added to verify the working of test
NameNode.format(conf);
}
}
| 7,400
| 39.222826
| 104
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.net.StaticMapping;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestDefaultBlockPlacementPolicy {
private static final short REPLICATION_FACTOR = (short) 3;
private static final int DEFAULT_BLOCK_SIZE = 1024;
private MiniDFSCluster cluster = null;
private NamenodeProtocols nameNodeRpc = null;
private FSNamesystem namesystem = null;
private PermissionStatus perm = null;
@Before
public void setup() throws IOException {
StaticMapping.resetMap();
Configuration conf = new HdfsConfiguration();
final String[] racks = { "/RACK0", "/RACK0", "/RACK2", "/RACK3", "/RACK2" };
final String[] hosts = { "/host0", "/host1", "/host2", "/host3", "/host4" };
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).racks(racks)
.hosts(hosts).build();
cluster.waitActive();
nameNodeRpc = cluster.getNameNodeRpc();
namesystem = cluster.getNamesystem();
perm = new PermissionStatus("TestDefaultBlockPlacementPolicy", null,
FsPermission.getDefault());
}
@After
public void teardown() {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Verify rack-local node selection for the rack-local client in case of no
* local node
*/
@Test
public void testLocalRackPlacement() throws Exception {
String clientMachine = "client.foo.com";
// Map client to RACK2
String clientRack = "/RACK2";
StaticMapping.addNodeToRack(clientMachine, clientRack);
testPlacement(clientMachine, clientRack);
}
/**
* Verify Random rack node selection for remote client
*/
@Test
public void testRandomRackSelectionForRemoteClient() throws Exception {
String clientMachine = "client.foo.com";
// Don't map client machine to any rack,
// so by default it will be treated as /default-rack
// in that case a random node should be selected as first node.
testPlacement(clientMachine, null);
}
private void testPlacement(String clientMachine,
String clientRack) throws IOException {
// write 5 files and check whether all times block placed
for (int i = 0; i < 5; i++) {
String src = "/test-" + i;
// Create the file with client machine
HdfsFileStatus fileStatus = namesystem.startFile(src, perm,
clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
null, null, fileStatus.getFileId(), null);
assertEquals("Block should be allocated sufficient locations",
REPLICATION_FACTOR, locatedBlock.getLocations().length);
if (clientRack != null) {
assertEquals("First datanode should be rack local", clientRack,
locatedBlock.getLocations()[0].getNetworkLocation());
}
nameNodeRpc.abandonBlock(locatedBlock.getBlock(), fileStatus.getFileId(),
src, clientMachine);
}
}
}
| 4,634
| 36.991803
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNThroughputBenchmark.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.After;
import org.junit.Test;
public class TestNNThroughputBenchmark {
@After
public void cleanUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
}
/**
* This test runs all benchmarks defined in {@link NNThroughputBenchmark}.
*/
@Test
public void testNNThroughput() throws Exception {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
FileSystem.setDefaultUri(conf, "hdfs://localhost:" + 0);
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
DFSTestUtil.formatNameNode(conf);
String[] args = new String[] {"-op", "all"};
NNThroughputBenchmark.runBenchmark(conf, Arrays.asList(args));
}
}
| 2,072
| 36.017857
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSPermissionChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
import static org.apache.hadoop.fs.permission.AclEntryType.MASK;
import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
import static org.apache.hadoop.fs.permission.AclEntryType.USER;
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.EXECUTE;
import static org.apache.hadoop.fs.permission.FsAction.NONE;
import static org.apache.hadoop.fs.permission.FsAction.READ;
import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE;
import static org.apache.hadoop.fs.permission.FsAction.WRITE;
import static org.apache.hadoop.fs.permission.FsAction.WRITE_EXECUTE;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.junit.Assert.fail;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.mock;
import java.io.IOException;
import java.util.Arrays;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.Before;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* Unit tests covering FSPermissionChecker. All tests in this suite have been
* cross-validated against Linux setfacl/getfacl to check for consistency of the
* HDFS implementation.
*/
public class TestFSPermissionChecker {
private static final long PREFERRED_BLOCK_SIZE = 128 * 1024 * 1024;
private static final short REPLICATION = 3;
private static final String SUPERGROUP = "supergroup";
private static final String SUPERUSER = "superuser";
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { "sales" });
private static final UserGroupInformation CLARK =
UserGroupInformation.createUserForTesting("clark", new String[] { "execs" });
private FSDirectory dir;
private INodeDirectory inodeRoot;
@Before
public void setUp() throws IOException {
Configuration conf = new Configuration();
FSNamesystem fsn = mock(FSNamesystem.class);
doAnswer(new Answer() {
@Override
public Object answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
FsPermission perm = (FsPermission) args[0];
return new PermissionStatus(SUPERUSER, SUPERGROUP, perm);
}
}).when(fsn).createFsOwnerPermissions(any(FsPermission.class));
dir = new FSDirectory(fsn, conf);
inodeRoot = dir.getRoot();
}
@Test
public void testAclOwner() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(BRUCE, "/file1", READ);
assertPermissionGranted(BRUCE, "/file1", WRITE);
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionDenied(BRUCE, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
}
@Test
public void testAclNamedUser() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclNamedUserDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", NONE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", READ);
}
@Test
public void testAclNamedUserTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", NONE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, MASK, READ_EXECUTE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", ALL);
}
@Test
public void testAclNamedUserMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0620);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, WRITE),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclGroup() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclGroupDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0604);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, MASK, NONE),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclGroupTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, MASK, NONE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", ALL);
}
@Test
public void testAclGroupTraverseDenyOnlyDefaultEntries() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, "sales", NONE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", READ);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/dir1/file1", ALL);
}
@Test
public void testAclGroupMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclNamedGroup() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0640);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", READ),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, NONE));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclNamedGroupDeny() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "execs", NONE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
@Test
public void testAclNamedGroupTraverseDeny() throws IOException {
INodeDirectory inodeDir = createINodeDirectory(inodeRoot, "dir1", "bruce",
"execs", (short)0755);
INodeFile inodeFile = createINodeFile(inodeDir, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeDir,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", NONE),
aclEntry(ACCESS, MASK, READ_EXECUTE),
aclEntry(ACCESS, OTHER, READ_EXECUTE));
assertPermissionGranted(BRUCE, "/dir1/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", READ);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/dir1/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/dir1/file1", ALL);
}
@Test
public void testAclNamedGroupMask() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "execs",
(short)0644);
addAcl(inodeFile,
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, GROUP, "sales", READ_WRITE),
aclEntry(ACCESS, MASK, READ),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", READ_WRITE);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionGranted(DIANA, "/file1", READ);
assertPermissionDenied(DIANA, "/file1", WRITE);
assertPermissionDenied(DIANA, "/file1", EXECUTE);
assertPermissionDenied(DIANA, "/file1", READ_WRITE);
assertPermissionDenied(DIANA, "/file1", READ_EXECUTE);
assertPermissionDenied(DIANA, "/file1", WRITE_EXECUTE);
assertPermissionDenied(DIANA, "/file1", ALL);
}
@Test
public void testAclOther() throws IOException {
INodeFile inodeFile = createINodeFile(inodeRoot, "file1", "bruce", "sales",
(short)0774);
addAcl(inodeFile,
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, READ));
assertPermissionGranted(BRUCE, "/file1", ALL);
assertPermissionGranted(DIANA, "/file1", ALL);
assertPermissionGranted(CLARK, "/file1", READ);
assertPermissionDenied(CLARK, "/file1", WRITE);
assertPermissionDenied(CLARK, "/file1", EXECUTE);
assertPermissionDenied(CLARK, "/file1", READ_WRITE);
assertPermissionDenied(CLARK, "/file1", READ_EXECUTE);
assertPermissionDenied(CLARK, "/file1", WRITE_EXECUTE);
assertPermissionDenied(CLARK, "/file1", ALL);
}
private void addAcl(INodeWithAdditionalFields inode, AclEntry... acl)
throws IOException {
AclStorage.updateINodeAcl(inode,
Arrays.asList(acl), Snapshot.CURRENT_STATE_ID);
}
private void assertPermissionGranted(UserGroupInformation user, String path,
FsAction access) throws IOException {
INodesInPath iip = dir.getINodesInPath(path, true);
dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
false, null, null, access, null, false);
}
private void assertPermissionDenied(UserGroupInformation user, String path,
FsAction access) throws IOException {
try {
INodesInPath iip = dir.getINodesInPath(path, true);
dir.getPermissionChecker(SUPERUSER, SUPERGROUP, user).checkPermission(iip,
false, null, null, access, null, false);
fail("expected AccessControlException for user + " + user + ", path = " +
path + ", access = " + access);
} catch (AccessControlException e) {
assertTrue("Permission denied messages must carry the username",
e.getMessage().contains(user.getUserName().toString()));
assertTrue("Permission denied messages must carry the path parent",
e.getMessage().contains(
new Path(path).getParent().toUri().getPath()));
}
}
private static INodeDirectory createINodeDirectory(INodeDirectory parent,
String name, String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
FsPermission.createImmutable(perm));
INodeDirectory inodeDirectory = new INodeDirectory(
HdfsConstants.GRANDFATHER_INODE_ID, name.getBytes("UTF-8"), permStatus, 0L);
parent.addChild(inodeDirectory);
return inodeDirectory;
}
private static INodeFile createINodeFile(INodeDirectory parent, String name,
String owner, String group, short perm) throws IOException {
PermissionStatus permStatus = PermissionStatus.createImmutable(owner, group,
FsPermission.createImmutable(perm));
INodeFile inodeFile = new INodeFile(HdfsConstants.GRANDFATHER_INODE_ID,
name.getBytes("UTF-8"), permStatus, 0L, 0L, null, REPLICATION,
PREFERRED_BLOCK_SIZE, (byte)0);
parent.addChild(inodeFile);
return inodeFile;
}
}
| 19,467
| 42.358575
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestQuotaByStorageType.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
public class TestQuotaByStorageType {
private static final int BLOCKSIZE = 1024;
private static final short REPLICATION = 3;
private static final long seed = 0L;
private static final Path dir = new Path("/TestQuotaByStorageType");
private MiniDFSCluster cluster;
private FSDirectory fsdir;
private DistributedFileSystem dfs;
private FSNamesystem fsn;
protected static final Log LOG = LogFactory.getLog(TestQuotaByStorageType.class);
@Before
public void setUp() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
// Setup a 3-node cluster and configure
// each node with 1 SSD and 1 DISK without capacity limitation
cluster = new MiniDFSCluster
.Builder(conf)
.numDataNodes(REPLICATION)
.storageTypes(new StorageType[]{StorageType.SSD, StorageType.DEFAULT})
.build();
cluster.waitActive();
fsdir = cluster.getNamesystem().getFSDirectory();
dfs = cluster.getFileSystem();
fsn = cluster.getNamesystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateOneSSD() throws Exception {
testQuotaByStorageTypeWithFileCreateCase(
HdfsConstants.ONESSD_STORAGE_POLICY_NAME,
StorageType.SSD,
(short)1);
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateAllSSD() throws Exception {
testQuotaByStorageTypeWithFileCreateCase(
HdfsConstants.ALLSSD_STORAGE_POLICY_NAME,
StorageType.SSD,
(short)3);
}
void testQuotaByStorageTypeWithFileCreateCase(
String storagePolicy, StorageType storageType, short replication) throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to storagePolicy
dfs.setStoragePolicy(foo, storagePolicy);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, storageType, BLOCKSIZE * 10);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(storageType);
assertEquals(file1Len * replication, storageTypeConsumed);
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateAppend() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// append several blocks
int appendLen = BLOCKSIZE * 2;
DFSTestUtil.appendFile(dfs, createdFile1, appendLen);
file1Len += appendLen;
ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateDelete() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2.5 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, storageTypeConsumed);
// Delete file and verify the consumed space of the storage type is updated
dfs.delete(createdFile1, false);
storageTypeConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(0, storageTypeConsumed);
QuotaCounts counts = fnode.computeQuotaUsage(
fsn.getBlockManager().getStoragePolicySuite(), true);
assertEquals(fnode.dumpTreeRecursively().toString(), 0,
counts.getTypeSpaces().get(StorageType.SSD));
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), 0);
assertEquals(cs.getTypeConsumed(StorageType.SSD), 0);
assertEquals(cs.getTypeConsumed(StorageType.DISK), 0);
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateRename() throws Exception {
final Path foo = new Path(dir, "foo");
dfs.mkdirs(foo);
Path createdFile1foo = new Path(foo, "created_file1.data");
final Path bar = new Path(dir, "bar");
dfs.mkdirs(bar);
Path createdFile1bar = new Path(bar, "created_file1.data");
// set storage policy on directory "foo" and "bar" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setStoragePolicy(bar, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
dfs.setQuotaByStorageType(bar, StorageType.SSD, BLOCKSIZE * 2);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 3 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 3;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1foo, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify space consumed and remaining quota
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// move file from foo to bar
try {
dfs.rename(createdFile1foo, createdFile1bar);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
/**
* Test if the quota can be correctly updated for create file even
* QuotaByStorageTypeExceededException is thrown
*/
@Test(timeout = 60000)
public void testQuotaByStorageTypeExceptionWithFileCreate() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create the 1st file of size 2 * BLOCKSIZE under directory "foo" and expect no exception
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, currentSSDConsumed);
// Create the 2nd file of size 1.5 * BLOCKSIZE under directory "foo" and expect no exception
Path createdFile2 = new Path(foo, "created_file2.data");
long file2Len = BLOCKSIZE + BLOCKSIZE / 2;
DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, REPLICATION, seed);
currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len + file2Len, currentSSDConsumed);
// Create the 3rd file of size BLOCKSIZE under directory "foo" and expect quota exceeded exception
Path createdFile3 = new Path(foo, "created_file3.data");
long file3Len = BLOCKSIZE;
try {
DFSTestUtil.createFile(dfs, createdFile3, bufLen, file3Len, BLOCKSIZE, REPLICATION, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len + file2Len, currentSSDConsumed);
}
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOffChildOff() throws Exception {
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// Create file of size 2.5 * BLOCKSIZE under child directory.
// Since both parent and child directory do not have SSD quota set,
// expect succeed without exception
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
REPLICATION, seed);
// Verify SSD usage at the root level as both parent/child don't have DirectoryWithQuotaFeature
INode fnode = fsdir.getINode4Write("/");
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOffChildOn() throws Exception {
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(child, StorageType.SSD, 2 * BLOCKSIZE);
// Create file of size 2.5 * BLOCKSIZE under child directory
// Since child directory have SSD quota of 2 * BLOCKSIZE,
// expect an exception when creating files under child directory.
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
try {
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
REPLICATION, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOff() throws Exception {
short replication = 1;
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(parent, StorageType.SSD, 3 * BLOCKSIZE);
// Create file of size 2.5 * BLOCKSIZE under child directory
// Verify parent Quota applies
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
replication, seed);
INode fnode = fsdir.getINode4Write(parent.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
long currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, currentSSDConsumed);
// Create the 2nd file of size BLOCKSIZE under child directory and expect quota exceeded exception
Path createdFile2 = new Path(child, "created_file2.data");
long file2Len = BLOCKSIZE;
try {
DFSTestUtil.createFile(dfs, createdFile2, bufLen, file2Len, BLOCKSIZE, replication, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
currentSSDConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, currentSSDConsumed);
}
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeParentOnChildOn() throws Exception {
final Path parent = new Path(dir, "parent");
final Path child = new Path(parent, "child");
dfs.mkdirs(parent);
dfs.mkdirs(child);
dfs.setStoragePolicy(parent, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(parent, StorageType.SSD, 2 * BLOCKSIZE);
dfs.setQuotaByStorageType(child, StorageType.SSD, 3 * BLOCKSIZE);
// Create file of size 2.5 * BLOCKSIZE under child directory
// Verify parent Quota applies
Path createdFile1 = new Path(child, "created_file1.data");
long file1Len = BLOCKSIZE * 2 + BLOCKSIZE / 2;
int bufLen = BLOCKSIZE / 16;
try {
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE,
REPLICATION, seed);
fail("Should have failed with QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
}
}
/**
* Both traditional space quota and the storage type quota for SSD are set and
* not exceeded.
*/
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithTraditionalQuota() throws Exception {
final Path foo = new Path(dir, "foo");
dfs.mkdirs(foo);
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 10);
dfs.setQuota(foo, Long.MAX_VALUE - 1, REPLICATION * BLOCKSIZE * 10);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
Path createdFile = new Path(foo, "created_file.data");
long fileLen = BLOCKSIZE * 2 + BLOCKSIZE / 2;
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
fileLen, BLOCKSIZE, REPLICATION, seed);
QuotaCounts cnt = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed();
assertEquals(2, cnt.getNameSpace());
assertEquals(fileLen * REPLICATION, cnt.getStorageSpace());
dfs.delete(createdFile, true);
QuotaCounts cntAfterDelete = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed();
assertEquals(1, cntAfterDelete.getNameSpace());
assertEquals(0, cntAfterDelete.getStorageSpace());
// Validate the computeQuotaUsage()
QuotaCounts counts = fnode.computeQuotaUsage(
fsn.getBlockManager().getStoragePolicySuite(), true);
assertEquals(fnode.dumpTreeRecursively().toString(), 1,
counts.getNameSpace());
assertEquals(fnode.dumpTreeRecursively().toString(), 0,
counts.getStorageSpace());
}
/**
* Both traditional space quota and the storage type quota for SSD are set and
* exceeded. expect DSQuotaExceededException is thrown as we check traditional
* space quota first and then storage type quota.
*/
@Test(timeout = 60000)
public void testQuotaByStorageTypeAndTraditionalQuotaException1()
throws Exception {
testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
4 * REPLICATION, 4, 5, REPLICATION);
}
/**
* Both traditional space quota and the storage type quota for SSD are set and
* SSD quota is exceeded but traditional space quota is not exceeded.
*/
@Test(timeout = 60000)
public void testQuotaByStorageTypeAndTraditionalQuotaException2()
throws Exception {
testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
5 * REPLICATION, 4, 5, REPLICATION);
}
/**
* Both traditional space quota and the storage type quota for SSD are set and
* traditional space quota is exceeded but SSD quota is not exceeded.
*/
@Test(timeout = 60000)
public void testQuotaByStorageTypeAndTraditionalQuotaException3()
throws Exception {
testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
4 * REPLICATION, 5, 5, REPLICATION);
}
private void testQuotaByStorageTypeOrTraditionalQuotaExceededCase(
long storageSpaceQuotaInBlocks, long ssdQuotaInBlocks,
long testFileLenInBlocks, short replication) throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
dfs.mkdirs(testDir);
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
final long ssdQuota = BLOCKSIZE * ssdQuotaInBlocks;
final long storageSpaceQuota = BLOCKSIZE * storageSpaceQuotaInBlocks;
dfs.setQuota(testDir, Long.MAX_VALUE - 1, storageSpaceQuota);
dfs.setQuotaByStorageType(testDir, StorageType.SSD, ssdQuota);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
Path createdFile = new Path(testDir, "created_file.data");
long fileLen = testFileLenInBlocks * BLOCKSIZE;
try {
DFSTestUtil.createFile(dfs, createdFile, BLOCKSIZE / 16,
fileLen, BLOCKSIZE, replication, seed);
fail("Should have failed with DSQuotaExceededException or " +
"QuotaByStorageTypeExceededException ");
} catch (Throwable t) {
LOG.info("Got expected exception ", t);
long currentSSDConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(Math.min(ssdQuota, storageSpaceQuota/replication),
currentSSDConsumed);
}
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithSnapshot() throws Exception {
final Path sub1 = new Path(dir, "Sub1");
dfs.mkdirs(sub1);
// Setup ONE_SSD policy and SSD quota of 4 * BLOCKSIZE on sub1
dfs.setStoragePolicy(sub1, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
dfs.setQuotaByStorageType(sub1, StorageType.SSD, 4 * BLOCKSIZE);
INode sub1Node = fsdir.getINode4Write(sub1.toString());
assertTrue(sub1Node.isDirectory());
assertTrue(sub1Node.isQuotaSet());
// Create file1 of size 2 * BLOCKSIZE under sub1
Path file1 = new Path(sub1, "file1");
long file1Len = 2 * BLOCKSIZE;
DFSTestUtil.createFile(dfs, file1, file1Len, REPLICATION, seed);
// Create snapshot on sub1 named s1
SnapshotTestHelper.createSnapshot(dfs, sub1, "s1");
// Verify sub1 SSD usage is unchanged after creating snapshot s1
long ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Delete file1
dfs.delete(file1, false);
// Verify sub1 SSD usage is unchanged due to the existence of snapshot s1
ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
QuotaCounts counts1 = sub1Node.computeQuotaUsage(
fsn.getBlockManager().getStoragePolicySuite(), true);
assertEquals(sub1Node.dumpTreeRecursively().toString(), file1Len,
counts1.getTypeSpaces().get(StorageType.SSD));
ContentSummary cs1 = dfs.getContentSummary(sub1);
assertEquals(cs1.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs1.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs1.getTypeConsumed(StorageType.DISK), file1Len * 2);
// Delete the snapshot s1
dfs.deleteSnapshot(sub1, "s1");
// Verify sub1 SSD usage is fully reclaimed and changed to 0
ssdConsumed = sub1Node.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(0, ssdConsumed);
QuotaCounts counts2 = sub1Node.computeQuotaUsage(
fsn.getBlockManager().getStoragePolicySuite(), true);
assertEquals(sub1Node.dumpTreeRecursively().toString(), 0,
counts2.getTypeSpaces().get(StorageType.SSD));
ContentSummary cs2 = dfs.getContentSummary(sub1);
assertEquals(cs2.getSpaceConsumed(), 0);
assertEquals(cs2.getTypeConsumed(StorageType.SSD), 0);
assertEquals(cs2.getTypeConsumed(StorageType.DISK), 0);
}
@Test(timeout = 60000)
public void testQuotaByStorageTypeWithFileCreateTruncate() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on directory "foo"
dfs.setQuotaByStorageType(foo, StorageType.SSD, BLOCKSIZE * 4);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify SSD consumed before truncate
long ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Truncate file to 1 * BLOCKSIZE
int newFile1Len = BLOCKSIZE;
dfs.truncate(createdFile1, newFile1Len);
// Verify SSD consumed after truncate
ssdConsumed = fnode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(newFile1Len, ssdConsumed);
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), newFile1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), newFile1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), newFile1Len * 2);
}
@Test
public void testQuotaByStorageTypePersistenceInEditLog() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
Path createdFile1 = new Path(testDir, "created_file1.data");
dfs.mkdirs(testDir);
// set storage policy on testDir to ONESSD
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on testDir
final long SSD_QUOTA = BLOCKSIZE * 4;
dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under testDir
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify SSD consumed before namenode restart
long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Restart namenode to make sure the editlog is correct
cluster.restartNameNode(true);
INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
// Verify quota is still set
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
for (StorageType t: StorageType.getTypesSupportingQuota()) {
if (t != StorageType.SSD) {
assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
}
}
long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory()
.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
@Test
public void testQuotaByStorageTypePersistenceInFsImage() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
final Path testDir = new Path(dir, METHOD_NAME);
Path createdFile1 = new Path(testDir, "created_file1.data");
dfs.mkdirs(testDir);
// set storage policy on testDir to ONESSD
dfs.setStoragePolicy(testDir, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
// set quota by storage type on testDir
final long SSD_QUOTA = BLOCKSIZE * 4;
dfs.setQuotaByStorageType(testDir, StorageType.SSD, SSD_QUOTA);
INode testDirNode = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under testDir
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify SSD consumed before namenode restart
long ssdConsumed = testDirNode.asDirectory().getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumed);
// Restart the namenode with checkpoint to make sure fsImage is correct
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
dfs.saveNamespace();
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(true);
INode testDirNodeAfterNNRestart = fsdir.getINode4Write(testDir.toString());
assertTrue(testDirNode.isDirectory());
assertTrue(testDirNode.isQuotaSet());
QuotaCounts qc = testDirNodeAfterNNRestart.getQuotaCounts();
assertEquals(SSD_QUOTA, qc.getTypeSpace(StorageType.SSD));
for (StorageType t: StorageType.getTypesSupportingQuota()) {
if (t != StorageType.SSD) {
assertEquals(HdfsConstants.QUOTA_RESET, qc.getTypeSpace(t));
}
}
long ssdConsumedAfterNNRestart = testDirNodeAfterNNRestart.asDirectory()
.getDirectoryWithQuotaFeature()
.getSpaceConsumed().getTypeSpaces().get(StorageType.SSD);
assertEquals(file1Len, ssdConsumedAfterNNRestart);
}
@Test(timeout = 60000)
public void testContentSummaryWithoutQuotaByStorageType() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
// set storage policy on directory "foo" to ONESSD
dfs.setStoragePolicy(foo, HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
assertEquals(cs.getTypeConsumed(StorageType.SSD), file1Len);
assertEquals(cs.getTypeConsumed(StorageType.DISK), file1Len * 2);
}
@Test(timeout = 60000)
public void testContentSummaryWithoutStoragePolicy() throws Exception {
final Path foo = new Path(dir, "foo");
Path createdFile1 = new Path(foo, "created_file1.data");
dfs.mkdirs(foo);
INode fnode = fsdir.getINode4Write(foo.toString());
assertTrue(fnode.isDirectory());
assertTrue(!fnode.isQuotaSet());
// Create file of size 2 * BLOCKSIZE under directory "foo"
long file1Len = BLOCKSIZE * 2;
int bufLen = BLOCKSIZE / 16;
DFSTestUtil.createFile(dfs, createdFile1, bufLen, file1Len, BLOCKSIZE, REPLICATION, seed);
// Verify getContentSummary without any quota set
// Expect no type quota and usage information available
ContentSummary cs = dfs.getContentSummary(foo);
assertEquals(cs.getSpaceConsumed(), file1Len * REPLICATION);
for (StorageType t : StorageType.values()) {
assertEquals(cs.getTypeConsumed(t), 0);
assertEquals(cs.getTypeQuota(t), -1);
}
}
}
| 31,903
| 39.642038
| 102
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestProcessCorruptBlocks.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test;
public class TestProcessCorruptBlocks {
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this the above condition is
* tested by reducing the replication factor
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (2) is less than replication factor (3))
* Set the replication factor to 2
* Verify that the corrupt replica is removed.
* (corrupt replica should not be removed since number of good
* replicas (2) is equal to replication factor (2))
*/
@Test
public void testWhenDecreasingReplication() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
namesystem.setReplication(fileName.toString(), (short) 2);
// wait for 3 seconds so that all block reports are processed.
try {
Thread.sleep(3000);
} catch (InterruptedException ignored) {
}
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. In this test, the above
* condition is achieved by increasing the number of good replicas by
* replicating on a new Datanode.
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt one replica of a block of the file
* Verify that there are still 2 good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good replicas
* (2) is less than replication factor (3))
* Start a new data node
* Verify that the a new replica is created and corrupt replica is
* removed.
*
*/
@Test
public void testByAddingAnExtraDataNode() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
DataNodeProperties dnPropsFourth = cluster.stopDataNode(3);
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
assertEquals(2, countReplicas(namesystem, block).liveReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
cluster.restartDataNode(dnPropsFourth);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
assertEquals(3, countReplicas(namesystem, block).liveReplicas());
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
/**
* The corrupt block has to be removed when the number of valid replicas
* matches replication factor for the file. The above condition should hold
* true as long as there is one good replica. This test verifies that.
*
* The test strategy :
* Bring up Cluster with 2 DataNodes
* Create a file of replication factor 2
* Corrupt one replica of a block of the file
* Verify that there is one good replicas and 1 corrupt replica
* (corrupt replica should not be removed since number of good
* replicas (1) is less than replication factor (2)).
* Set the replication factor to 1
* Verify that the corrupt replica is removed.
* (corrupt replica should be removed since number of good
* replicas (1) is equal to replication factor (1))
*/
@Test(timeout=20000)
public void testWithReplicationFactorAsOne() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 2, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 2);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
DFSTestUtil.waitReplication(fs, fileName, (short) 1);
assertEquals(1, countReplicas(namesystem, block).liveReplicas());
assertEquals(1, countReplicas(namesystem, block).corruptReplicas());
namesystem.setReplication(fileName.toString(), (short) 1);
// wait for 3 seconds so that all block reports are processed.
for (int i = 0; i < 10; i++) {
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
if (countReplicas(namesystem, block).corruptReplicas() == 0) {
break;
}
}
assertEquals(1, countReplicas(namesystem, block).liveReplicas());
assertEquals(0, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
/**
* None of the blocks can be removed if all blocks are corrupt.
*
* The test strategy :
* Bring up Cluster with 3 DataNodes
* Create a file of replication factor 3
* Corrupt all three replicas
* Verify that all replicas are corrupt and 3 replicas are present.
* Set the replication factor to 1
* Verify that all replicas are corrupt and 3 replicas are present.
*/
@Test
public void testWithAllCorruptReplicas() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
FileSystem fs = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
try {
final Path fileName = new Path("/foo1");
DFSTestUtil.createFile(fs, fileName, 2, (short) 3, 0L);
DFSTestUtil.waitReplication(fs, fileName, (short) 3);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, fileName);
corruptBlock(cluster, fs, fileName, 0, block);
corruptBlock(cluster, fs, fileName, 1, block);
corruptBlock(cluster, fs, fileName, 2, block);
// wait for 3 seconds so that all block reports are processed.
try {
Thread.sleep(3000);
} catch (InterruptedException ignored) {
}
assertEquals(0, countReplicas(namesystem, block).liveReplicas());
assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
namesystem.setReplication(fileName.toString(), (short) 1);
// wait for 3 seconds so that all block reports are processed.
try {
Thread.sleep(3000);
} catch (InterruptedException ignored) {
}
assertEquals(0, countReplicas(namesystem, block).liveReplicas());
assertEquals(3, countReplicas(namesystem, block).corruptReplicas());
} finally {
cluster.shutdown();
}
}
private static NumberReplicas countReplicas(final FSNamesystem namesystem, ExtendedBlock block) {
final BlockManager blockManager = namesystem.getBlockManager();
return blockManager.countNodes(blockManager.getStoredBlock(
block.getLocalBlock()));
}
private void corruptBlock(MiniDFSCluster cluster, FileSystem fs, final Path fileName,
int dnIndex, ExtendedBlock block) throws IOException {
// corrupt the block on datanode dnIndex
// the indexes change once the nodes are restarted.
// But the datadirectory will not change
assertTrue(cluster.corruptReplica(dnIndex, block));
// Run directory scanner to update the DN's volume map
DataNodeTestUtils.runDirectoryScanner(cluster.getDataNodes().get(0));
DataNodeProperties dnProps = cluster.stopDataNode(0);
// Each datanode has multiple data dirs, check each
for (int dirIndex = 0; dirIndex < 2; dirIndex++) {
final String bpid = cluster.getNamesystem().getBlockPoolId();
File storageDir = cluster.getStorageDir(dnIndex, dirIndex);
File dataDir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
File scanLogFile = new File(dataDir, "dncp_block_verification.log.curr");
if (scanLogFile.exists()) {
// wait for one minute for deletion to succeed;
for (int i = 0; !scanLogFile.delete(); i++) {
assertTrue("Could not delete log file in one minute", i < 60);
try {
Thread.sleep(1000);
} catch (InterruptedException ignored) {
}
}
}
}
// restart the detained so the corrupt replica will be detected
cluster.restartDataNode(dnProps);
}
}
| 12,212
| 39.440397
| 99
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.apache.hadoop.ipc.ClientId;
import org.apache.hadoop.ipc.RPC.RpcKind;
import org.apache.hadoop.ipc.RetryCache.CacheEntry;
import org.apache.hadoop.ipc.RpcConstants;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.LightWeightCache;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Tests for ensuring the namenode retry cache works correctly for
* non-idempotent requests.
*
* Retry cache works based on tracking previously received request based on the
* ClientId and CallId received in RPC requests and storing the response. The
* response is replayed on retry when the same request is received again.
*
* The test works by manipulating the Rpc {@link Server} current RPC call. For
* testing retried requests, an Rpc callId is generated only once using
* {@link #newCall()} and reused for many method calls. For testing non-retried
* request, a new callId is generated using {@link #newCall()}.
*/
public class TestNamenodeRetryCache {
private static final byte[] CLIENT_ID = ClientId.getClientId();
private static MiniDFSCluster cluster;
private static NamenodeProtocols nnRpc;
private static final FsPermission perm = FsPermission.getDefault();
private static DistributedFileSystem filesystem;
private static int callId = 100;
private static Configuration conf;
private static final int BlockSize = 512;
/** Start a cluster */
@Before
public void setup() throws Exception {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BlockSize);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
nnRpc = cluster.getNameNode().getRpcServer();
filesystem = cluster.getFileSystem();
}
/** Cleanup after the test
* @throws IOException
* @throws UnresolvedLinkException
* @throws SafeModeException
* @throws AccessControlException */
@After
public void cleanup() throws IOException {
cluster.shutdown();
}
/** Set the current Server RPC call */
public static void newCall() {
Server.Call call = new Server.Call(++callId, 1, null, null,
RpcKind.RPC_PROTOCOL_BUFFER, CLIENT_ID);
Server.getCurCall().set(call);
}
public static void resetCall() {
Server.Call call = new Server.Call(RpcConstants.INVALID_CALL_ID, 1, null,
null, RpcKind.RPC_PROTOCOL_BUFFER, RpcConstants.DUMMY_CLIENT_ID);
Server.getCurCall().set(call);
}
private void concatSetup(String file1, String file2) throws Exception {
DFSTestUtil.createFile(filesystem, new Path(file1), BlockSize, (short)1, 0L);
DFSTestUtil.createFile(filesystem, new Path(file2), BlockSize, (short)1, 0L);
}
/**
* Tests for concat call
*/
@Test
public void testConcat() throws Exception {
resetCall();
String file1 = "/testNamenodeRetryCache/testConcat/file1";
String file2 = "/testNamenodeRetryCache/testConcat/file2";
// Two retried concat calls succeed
concatSetup(file1, file2);
newCall();
nnRpc.concat(file1, new String[]{file2});
nnRpc.concat(file1, new String[]{file2});
nnRpc.concat(file1, new String[]{file2});
// A non-retried concat request fails
newCall();
try {
// Second non-retry call should fail with an exception
nnRpc.concat(file1, new String[]{file2});
Assert.fail("testConcat - expected exception is not thrown");
} catch (IOException e) {
// Expected
}
}
/**
* Tests for delete call
*/
@Test
public void testDelete() throws Exception {
String dir = "/testNamenodeRetryCache/testDelete";
// Two retried calls to create a non existent file
newCall();
nnRpc.mkdirs(dir, perm, true);
newCall();
Assert.assertTrue(nnRpc.delete(dir, false));
Assert.assertTrue(nnRpc.delete(dir, false));
Assert.assertTrue(nnRpc.delete(dir, false));
// non-retried call fails and gets false as return
newCall();
Assert.assertFalse(nnRpc.delete(dir, false));
}
/**
* Test for createSymlink
*/
@Test
public void testCreateSymlink() throws Exception {
String target = "/testNamenodeRetryCache/testCreateSymlink/target";
// Two retried symlink calls succeed
newCall();
nnRpc.createSymlink(target, "/a/b", perm, true);
nnRpc.createSymlink(target, "/a/b", perm, true);
nnRpc.createSymlink(target, "/a/b", perm, true);
// non-retried call fails
newCall();
try {
// Second non-retry call should fail with an exception
nnRpc.createSymlink(target, "/a/b", perm, true);
Assert.fail("testCreateSymlink - expected exception is not thrown");
} catch (IOException e) {
// Expected
}
}
/**
* Test for create file
*/
@Test
public void testCreate() throws Exception {
String src = "/testNamenodeRetryCache/testCreate/file";
// Two retried calls succeed
newCall();
HdfsFileStatus status = nnRpc.create(src, perm, "holder",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 1, BlockSize, null);
Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
Assert.assertEquals(status, nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null));
// A non-retried call fails
newCall();
try {
nnRpc.create(src, perm, "holder", new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true, (short) 1, BlockSize, null);
Assert.fail("testCreate - expected exception is not thrown");
} catch (IOException e) {
// expected
}
}
/**
* Test for rename1
*/
@Test
public void testAppend() throws Exception {
String src = "/testNamenodeRetryCache/testAppend/src";
resetCall();
// Create a file with partial block
DFSTestUtil.createFile(filesystem, new Path(src), 128, (short)1, 0L);
// Retried append requests succeed
newCall();
LastBlockWithStatus b = nnRpc.append(src, "holder",
new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
Assert.assertEquals(b, nnRpc.append(src, "holder",
new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
Assert.assertEquals(b, nnRpc.append(src, "holder",
new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND))));
// non-retried call fails
newCall();
try {
nnRpc.append(src, "holder",
new EnumSetWritable<>(EnumSet.of(CreateFlag.APPEND)));
Assert.fail("testAppend - expected exception is not thrown");
} catch (Exception e) {
// Expected
}
}
/**
* Test for rename1
*/
@SuppressWarnings("deprecation")
@Test
public void testRename1() throws Exception {
String src = "/testNamenodeRetryCache/testRename1/src";
String target = "/testNamenodeRetryCache/testRename1/target";
resetCall();
nnRpc.mkdirs(src, perm, true);
// Retried renames succeed
newCall();
Assert.assertTrue(nnRpc.rename(src, target));
Assert.assertTrue(nnRpc.rename(src, target));
Assert.assertTrue(nnRpc.rename(src, target));
// A non-retried request fails
newCall();
Assert.assertFalse(nnRpc.rename(src, target));
}
/**
* Test for rename2
*/
@Test
public void testRename2() throws Exception {
String src = "/testNamenodeRetryCache/testRename2/src";
String target = "/testNamenodeRetryCache/testRename2/target";
resetCall();
nnRpc.mkdirs(src, perm, true);
// Retried renames succeed
newCall();
nnRpc.rename2(src, target, Rename.NONE);
nnRpc.rename2(src, target, Rename.NONE);
nnRpc.rename2(src, target, Rename.NONE);
// A non-retried request fails
newCall();
try {
nnRpc.rename2(src, target, Rename.NONE);
Assert.fail("testRename 2 expected exception is not thrown");
} catch (IOException e) {
// expected
}
}
/**
* Make sure a retry call does not hang because of the exception thrown in the
* first call.
*/
@Test(timeout = 60000)
public void testUpdatePipelineWithFailOver() throws Exception {
cluster.shutdown();
nnRpc = null;
filesystem = null;
cluster = new MiniDFSCluster.Builder(conf).nnTopology(
MiniDFSNNTopology.simpleHATopology()).numDataNodes(1).build();
cluster.waitActive();
NamenodeProtocols ns0 = cluster.getNameNodeRpc(0);
ExtendedBlock oldBlock = new ExtendedBlock();
ExtendedBlock newBlock = new ExtendedBlock();
DatanodeID[] newNodes = new DatanodeID[2];
String[] newStorages = new String[2];
newCall();
try {
ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
fail("Expect StandbyException from the updatePipeline call");
} catch (StandbyException e) {
// expected, since in the beginning both nn are in standby state
GenericTestUtils.assertExceptionContains(
HAServiceState.STANDBY.toString(), e);
}
cluster.transitionToActive(0);
try {
ns0.updatePipeline("testClient", oldBlock, newBlock, newNodes, newStorages);
} catch (IOException e) {
// ignore call should not hang.
}
}
/**
* Test for crateSnapshot
*/
@Test
public void testSnapshotMethods() throws Exception {
String dir = "/testNamenodeRetryCache/testCreateSnapshot/src";
resetCall();
nnRpc.mkdirs(dir, perm, true);
nnRpc.allowSnapshot(dir);
// Test retry of create snapshot
newCall();
String name = nnRpc.createSnapshot(dir, "snap1");
Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
Assert.assertEquals(name, nnRpc.createSnapshot(dir, "snap1"));
// Non retried calls should fail
newCall();
try {
nnRpc.createSnapshot(dir, "snap1");
Assert.fail("testSnapshotMethods expected exception is not thrown");
} catch (IOException e) {
// exptected
}
// Test retry of rename snapshot
newCall();
nnRpc.renameSnapshot(dir, "snap1", "snap2");
nnRpc.renameSnapshot(dir, "snap1", "snap2");
nnRpc.renameSnapshot(dir, "snap1", "snap2");
// Non retried calls should fail
newCall();
try {
nnRpc.renameSnapshot(dir, "snap1", "snap2");
Assert.fail("testSnapshotMethods expected exception is not thrown");
} catch (IOException e) {
// expected
}
// Test retry of delete snapshot
newCall();
nnRpc.deleteSnapshot(dir, "snap2");
nnRpc.deleteSnapshot(dir, "snap2");
nnRpc.deleteSnapshot(dir, "snap2");
// Non retried calls should fail
newCall();
try {
nnRpc.deleteSnapshot(dir, "snap2");
Assert.fail("testSnapshotMethods expected exception is not thrown");
} catch (IOException e) {
// expected
}
}
@Test
public void testRetryCacheConfig() {
// By default retry configuration should be enabled
Configuration conf = new HdfsConfiguration();
Assert.assertNotNull(FSNamesystem.initRetryCache(conf));
// If retry cache is disabled, it should not be created
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ENABLE_RETRY_CACHE_KEY, false);
Assert.assertNull(FSNamesystem.initRetryCache(conf));
}
/**
* After run a set of operations, restart NN and check if the retry cache has
* been rebuilt based on the editlog.
*/
@Test
public void testRetryCacheRebuild() throws Exception {
DFSTestUtil.runOperations(cluster, filesystem, conf, BlockSize, 0);
FSNamesystem namesystem = cluster.getNamesystem();
LightWeightCache<CacheEntry, CacheEntry> cacheSet =
(LightWeightCache<CacheEntry, CacheEntry>) namesystem.getRetryCache().getCacheSet();
assertEquals(25, cacheSet.size());
Map<CacheEntry, CacheEntry> oldEntries =
new HashMap<CacheEntry, CacheEntry>();
Iterator<CacheEntry> iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
oldEntries.put(entry, entry);
}
// restart NameNode
cluster.restartNameNode();
cluster.waitActive();
namesystem = cluster.getNamesystem();
// check retry cache
assertTrue(namesystem.hasRetryCache());
cacheSet = (LightWeightCache<CacheEntry, CacheEntry>) namesystem
.getRetryCache().getCacheSet();
assertEquals(25, cacheSet.size());
iter = cacheSet.iterator();
while (iter.hasNext()) {
CacheEntry entry = iter.next();
assertTrue(oldEntries.containsKey(entry));
}
}
}
| 15,097
| 33.081264
| 165
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRespectsBindHostKeys.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.hamcrest.core.Is.is;
import static org.hamcrest.core.IsNot.not;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.junit.Test;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.http.HttpConfig;
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
import java.io.File;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
/**
* This test checks that the NameNode respects the following keys:
*
* - DFS_NAMENODE_RPC_BIND_HOST_KEY
* - DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY
* - DFS_NAMENODE_HTTP_BIND_HOST_KEY
* - DFS_NAMENODE_HTTPS_BIND_HOST_KEY
*/
public class TestNameNodeRespectsBindHostKeys {
public static final Log LOG = LogFactory.getLog(TestNameNodeRespectsBindHostKeys.class);
private static final String WILDCARD_ADDRESS = "0.0.0.0";
private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
private static String getRpcServerAddress(MiniDFSCluster cluster) {
NameNodeRpcServer rpcServer = (NameNodeRpcServer) cluster.getNameNodeRpc();
return rpcServer.getClientRpcServer().getListenerAddress().getAddress().toString();
}
private static String getServiceRpcServerAddress(MiniDFSCluster cluster) {
NameNodeRpcServer rpcServer = (NameNodeRpcServer) cluster.getNameNodeRpc();
return rpcServer.getServiceRpcServer().getListenerAddress().getAddress().toString();
}
@Test (timeout=300000)
public void testRpcBindHostKey() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
// NN should not bind the wildcard address by default.
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = getRpcServerAddress(cluster);
assertThat("Bind address not expected to be wildcard by default.",
address, not("/" + WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
// Tell NN to bind the wildcard address.
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
// Verify that NN binds wildcard address now.
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = getRpcServerAddress(cluster);
assertThat("Bind address " + address + " is not wildcard.",
address, is("/" + WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test (timeout=300000)
public void testServiceRpcBindHostKey() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
LOG.info("Testing without " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
// NN should not bind the wildcard address by default.
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = getServiceRpcServerAddress(cluster);
assertThat("Bind address not expected to be wildcard by default.",
address, not("/" + WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
// Tell NN to bind the wildcard address.
conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
// Verify that NN binds wildcard address now.
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = getServiceRpcServerAddress(cluster);
assertThat("Bind address " + address + " is not wildcard.",
address, is("/" + WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test(timeout=300000)
public void testHttpBindHostKey() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
LOG.info("Testing without " + DFS_NAMENODE_HTTP_BIND_HOST_KEY);
// NN should not bind the wildcard address by default.
try {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = cluster.getNameNode().getHttpAddress().toString();
assertFalse("HTTP Bind address not expected to be wildcard by default.",
address.startsWith(WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
LOG.info("Testing with " + DFS_NAMENODE_HTTP_BIND_HOST_KEY);
// Tell NN to bind the wildcard address.
conf.set(DFS_NAMENODE_HTTP_BIND_HOST_KEY, WILDCARD_ADDRESS);
// Verify that NN binds wildcard address now.
try {
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = cluster.getNameNode().getHttpAddress().toString();
assertTrue("HTTP Bind address " + address + " is not wildcard.",
address.startsWith(WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private static final String BASEDIR = System.getProperty("test.build.dir",
"target/test-dir") + "/" + TestNameNodeRespectsBindHostKeys.class.getSimpleName();
private static void setupSsl() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(HdfsClientConfigKeys.DFS_WEBHDFS_ENABLED_KEY, true);
conf.set(DFSConfigKeys.DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, "localhost:0");
conf.set(DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY, "localhost:0");
File base = new File(BASEDIR);
FileUtil.fullyDelete(base);
assertTrue(base.mkdirs());
final String keystoresDir = new File(BASEDIR).getAbsolutePath();
final String sslConfDir = KeyStoreTestUtil.getClasspathDir(TestNameNodeRespectsBindHostKeys.class);
KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
}
/**
* HTTPS test is different since we need to setup SSL configuration.
* NN also binds the wildcard address for HTTPS port by default so we must
* pick a different host/port combination.
* @throws Exception
*/
@Test (timeout=300000)
public void testHttpsBindHostKey() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
LOG.info("Testing behavior without " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
setupSsl();
conf.set(DFS_HTTP_POLICY_KEY, HttpConfig.Policy.HTTPS_ONLY.name());
// NN should not bind the wildcard address by default.
try {
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = cluster.getNameNode().getHttpsAddress().toString();
assertFalse("HTTP Bind address not expected to be wildcard by default.",
address.startsWith(WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
LOG.info("Testing behavior with " + DFS_NAMENODE_HTTPS_BIND_HOST_KEY);
// Tell NN to bind the wildcard address.
conf.set(DFS_NAMENODE_HTTPS_BIND_HOST_KEY, WILDCARD_ADDRESS);
// Verify that NN binds wildcard address now.
try {
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
String address = cluster.getNameNode().getHttpsAddress().toString();
assertTrue("HTTP Bind address " + address + " is not wildcard.",
address.startsWith(WILDCARD_ADDRESS));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 9,693
| 35.719697
| 103
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.util.Canceler;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Test FSImage save/load when Snapshot is supported
*/
public class TestFSImageWithSnapshot {
{
SnapshotTestHelper.disableLogs();
((Log4JLogger)INode.LOG).getLogger().setLevel(Level.ALL);
}
static final long seed = 0;
static final short REPLICATION = 3;
static final int BLOCKSIZE = 1024;
static final long txid = 1;
private final Path dir = new Path("/TestSnapshot");
private static final String testDir =
System.getProperty("test.build.data", "build/test/data");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Create a temp fsimage file for testing.
* @param dir The directory where the fsimage file resides
* @param imageTxId The transaction id of the fsimage
* @return The file of the image file
*/
private File getImageFile(String dir, long imageTxId) {
return new File(dir, String.format("%s_%019d", NameNodeFile.IMAGE,
imageTxId));
}
/**
* Create a temp file for dumping the fsdir
* @param dir directory for the temp file
* @param suffix suffix of of the temp file
* @return the temp file
*/
private File getDumpTreeFile(String dir, String suffix) {
return new File(dir, String.format("dumpTree_%s", suffix));
}
/**
* Dump the fsdir tree to a temp file
* @param fileSuffix suffix of the temp file for dumping
* @return the temp file
*/
private File dumpTree2File(String fileSuffix) throws IOException {
File file = getDumpTreeFile(testDir, fileSuffix);
SnapshotTestHelper.dumpTree2File(fsn.getFSDirectory(), file);
return file;
}
/** Append a file without closing the output stream */
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
out.write(toAppend);
return out;
}
/** Save the fsimage to a temp file */
private File saveFSImageToTempFile() throws IOException {
SaveNamespaceContext context = new SaveNamespaceContext(fsn, txid,
new Canceler());
FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
FSImageCompression compression = FSImageCompression.createCompression(conf);
File imageFile = getImageFile(testDir, txid);
fsn.readLock();
try {
saver.save(imageFile, compression);
} finally {
fsn.readUnlock();
}
return imageFile;
}
/** Load the fsimage from a temp file */
private void loadFSImageFromTempFile(File imageFile) throws IOException {
FSImageFormat.LoaderDelegator loader = FSImageFormat.newLoader(conf, fsn);
fsn.writeLock();
fsn.getFSDirectory().writeLock();
try {
loader.load(imageFile, false);
FSImage.updateCountForQuota(fsn.getBlockManager().getStoragePolicySuite(),
INodeDirectory.valueOf(fsn.getFSDirectory().getINode("/"), "/"));
} finally {
fsn.getFSDirectory().writeUnlock();
fsn.writeUnlock();
}
}
/**
* Test when there is snapshot taken on root
*/
@Test
public void testSnapshotOnRoot() throws Exception {
final Path root = new Path("/");
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root, "s1");
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
INodeDirectory rootNode = fsn.dir.getINode4Write(root.toString())
.asDirectory();
assertTrue("The children list of root should be empty",
rootNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
// one snapshot on root: s1
List<DirectoryDiff> diffList = rootNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
// check SnapshotManager's snapshottable directory list
assertEquals(1, fsn.getSnapshotManager().getNumSnapshottableDirs());
SnapshottableDirectoryStatus[] sdirs = fsn.getSnapshotManager()
.getSnapshottableDirListing(null);
assertEquals(root, sdirs[0].getFullPath());
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
/**
* Testing steps:
* <pre>
* 1. Creating/modifying directories/files while snapshots are being taken.
* 2. Dump the FSDirectory tree of the namesystem.
* 3. Save the namesystem to a temp file (FSImage saving).
* 4. Restart the cluster and format the namesystem.
* 5. Load the namesystem from the temp file (FSImage loading).
* 6. Dump the FSDirectory again and compare the two dumped string.
* </pre>
*/
@Test
public void testSaveLoadImage() throws Exception {
int s = 0;
// make changes to the namesystem
hdfs.mkdirs(dir);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s" + ++s);
Path sub1 = new Path(dir, "sub1");
hdfs.mkdirs(sub1);
hdfs.setPermission(sub1, new FsPermission((short)0777));
Path sub11 = new Path(sub1, "sub11");
hdfs.mkdirs(sub11);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub1file1 = new Path(sub1, "sub1file1");
Path sub1file2 = new Path(sub1, "sub1file2");
DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub2 = new Path(dir, "sub2");
Path sub2file1 = new Path(sub2, "sub2file1");
Path sub2file2 = new Path(sub2, "sub2file2");
DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, sub2file2, BLOCKSIZE, REPLICATION, seed);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
hdfs.setReplication(sub1file1, (short) (REPLICATION - 1));
hdfs.delete(sub1file2, true);
hdfs.setOwner(sub2, "dr.who", "unknown");
hdfs.delete(sub2file1, true);
checkImage(s);
hdfs.createSnapshot(dir, "s" + ++s);
Path sub1_sub2file2 = new Path(sub1, "sub2file2");
hdfs.rename(sub2file2, sub1_sub2file2);
hdfs.rename(sub1file1, sub2file1);
checkImage(s);
hdfs.rename(sub2file1, sub2file2);
checkImage(s);
}
void checkImage(int s) throws IOException {
final String name = "s" + s;
// dump the fsdir tree
File fsnBefore = dumpTree2File(name + "_before");
// save the namesystem to a temp file
File imageFile = saveFSImageToTempFile();
long numSdirBefore = fsn.getNumSnapshottableDirs();
long numSnapshotBefore = fsn.getNumSnapshots();
SnapshottableDirectoryStatus[] dirBefore = hdfs.getSnapshottableDirListing();
// shutdown the cluster
cluster.shutdown();
// dump the fsdir tree
File fsnBetween = dumpTree2File(name + "_between");
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnBetween, true);
// restart the cluster, and format the cluster
cluster = new MiniDFSCluster.Builder(conf).format(true)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// load the namesystem from the temp file
loadFSImageFromTempFile(imageFile);
// dump the fsdir tree again
File fsnAfter = dumpTree2File(name + "_after");
// compare two dumped tree
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
long numSdirAfter = fsn.getNumSnapshottableDirs();
long numSnapshotAfter = fsn.getNumSnapshots();
SnapshottableDirectoryStatus[] dirAfter = hdfs.getSnapshottableDirListing();
Assert.assertEquals(numSdirBefore, numSdirAfter);
Assert.assertEquals(numSnapshotBefore, numSnapshotAfter);
Assert.assertEquals(dirBefore.length, dirAfter.length);
List<String> pathListBefore = new ArrayList<String>();
for (SnapshottableDirectoryStatus sBefore : dirBefore) {
pathListBefore.add(sBefore.getFullPath().toString());
}
for (SnapshottableDirectoryStatus sAfter : dirAfter) {
Assert.assertTrue(pathListBefore.contains(sAfter.getFullPath().toString()));
}
}
/**
* Test the fsimage saving/loading while file appending.
*/
@Test (timeout=60000)
public void testSaveLoadImageWithAppending() throws Exception {
Path sub1 = new Path(dir, "sub1");
Path sub1file1 = new Path(sub1, "sub1file1");
Path sub1file2 = new Path(sub1, "sub1file2");
DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
// 1. create snapshot s0
hdfs.allowSnapshot(dir);
hdfs.createSnapshot(dir, "s0");
// 2. create snapshot s1 before appending sub1file1 finishes
HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// also append sub1file2
DFSTestUtil.appendFile(hdfs, sub1file2, BLOCKSIZE);
hdfs.createSnapshot(dir, "s1");
out.close();
// 3. create snapshot s2 before appending finishes
out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
hdfs.createSnapshot(dir, "s2");
out.close();
// 4. save fsimage before appending finishes
out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// dump fsdir
File fsnBefore = dumpTree2File("before");
// save the namesystem to a temp file
File imageFile = saveFSImageToTempFile();
// 5. load fsimage and compare
// first restart the cluster, and format the cluster
out.close();
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(true)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// then load the fsimage
loadFSImageFromTempFile(imageFile);
// dump the fsdir tree again
File fsnAfter = dumpTree2File("after");
// compare two dumped tree
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
/**
* Test the fsimage loading while there is file under construction.
*/
@Test (timeout=60000)
public void testLoadImageWithAppending() throws Exception {
Path sub1 = new Path(dir, "sub1");
Path sub1file1 = new Path(sub1, "sub1file1");
Path sub1file2 = new Path(sub1, "sub1file2");
DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed);
hdfs.allowSnapshot(dir);
hdfs.createSnapshot(dir, "s0");
HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
/**
* Test fsimage loading when 1) there is an empty file loaded from fsimage,
* and 2) there is later an append operation to be applied from edit log.
*/
@Test (timeout=60000)
public void testLoadImageWithEmptyFile() throws Exception {
// create an empty file
Path file = new Path(dir, "file");
FSDataOutputStream out = hdfs.create(file);
out.close();
// save namespace
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
// append to the empty file
out = hdfs.append(file);
out.write(1);
out.close();
// restart cluster
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
FileStatus status = hdfs.getFileStatus(file);
assertEquals(1, status.getLen());
}
/**
* Testing a special case with snapshots. When the following steps happen:
* <pre>
* 1. Take snapshot s1 on dir.
* 2. Create new dir and files under subsubDir, which is descendant of dir.
* 3. Take snapshot s2 on dir.
* 4. Delete subsubDir.
* 5. Delete snapshot s2.
* </pre>
* When we merge the diff from s2 to s1 (since we deleted s2), we need to make
* sure all the files/dirs created after s1 should be destroyed. Otherwise
* we may save these files/dirs to the fsimage, and cause FileNotFound
* Exception while loading fsimage.
*/
@Test (timeout=300000)
public void testSaveLoadImageAfterSnapshotDeletion()
throws Exception {
// create initial dir and subdir
Path dir = new Path("/dir");
Path subDir = new Path(dir, "subdir");
Path subsubDir = new Path(subDir, "subsubdir");
hdfs.mkdirs(subsubDir);
// take snapshots on subdir and dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
// create new dir under initial dir
Path newDir = new Path(subsubDir, "newdir");
Path newFile = new Path(newDir, "newfile");
hdfs.mkdirs(newDir);
DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
// create another snapshot
SnapshotTestHelper.createSnapshot(hdfs, dir, "s2");
// delete subsubdir
hdfs.delete(subsubDir, true);
// delete snapshot s2
hdfs.deleteSnapshot(dir, "s2");
// restart cluster
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.format(false).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// save namespace to fsimage
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
}
}
| 18,025
| 34.207031
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAuditLogger.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.collect.Lists;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.top.TopAuditLogger;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.authorize.ProxyServers;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import java.io.IOException;
import java.net.HttpURLConnection;
import java.net.InetAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.List;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.NNTOP_ENABLED_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doThrow;
/**
* Tests for the {@link AuditLogger} custom audit logging interface.
*/
public class TestAuditLogger {
private static final short TEST_PERMISSION = (short) 0654;
@Before
public void setup() {
DummyAuditLogger.initialized = false;
DummyAuditLogger.logCount = 0;
DummyAuditLogger.remoteAddr = null;
Configuration conf = new HdfsConfiguration();
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
}
/**
* Tests that AuditLogger works as expected.
*/
@Test
public void testAuditLogger() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
DummyAuditLogger.class.getName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
FileSystem fs = cluster.getFileSystem();
long time = System.currentTimeMillis();
fs.setTimes(new Path("/"), time, time);
assertEquals(1, DummyAuditLogger.logCount);
} finally {
cluster.shutdown();
}
}
/**
* Tests that TopAuditLogger can be disabled
*/
@Test
public void testDisableTopAuditLogger() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(NNTOP_ENABLED_KEY, false);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
List<AuditLogger> auditLoggers =
cluster.getNameNode().getNamesystem().getAuditLoggers();
for (AuditLogger auditLogger : auditLoggers) {
assertFalse(
"top audit logger is still hooked in after it is disabled",
auditLogger instanceof TopAuditLogger);
}
} finally {
cluster.shutdown();
}
}
@Test
public void testWebHdfsAuditLogger() throws IOException, URISyntaxException {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
DummyAuditLogger.class.getName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
GetOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
URI uri = new URI(
"http",
NetUtils.getHostPortString(cluster.getNameNode().getHttpAddress()),
"/webhdfs/v1/", op.toQueryString(), null);
// non-proxy request
HttpURLConnection conn = (HttpURLConnection) uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(200, conn.getResponseCode());
conn.disconnect();
assertEquals(1, DummyAuditLogger.logCount);
assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
// non-trusted proxied request
conn = (HttpURLConnection) uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
conn.connect();
assertEquals(200, conn.getResponseCode());
conn.disconnect();
assertEquals(2, DummyAuditLogger.logCount);
assertEquals("127.0.0.1", DummyAuditLogger.remoteAddr);
// trusted proxied request
conf.set(ProxyServers.CONF_HADOOP_PROXYSERVERS, "127.0.0.1");
ProxyUsers.refreshSuperUserGroupsConfiguration(conf);
conn = (HttpURLConnection) uri.toURL().openConnection();
conn.setRequestMethod(op.getType().toString());
conn.setRequestProperty("X-Forwarded-For", "1.1.1.1");
conn.connect();
assertEquals(200, conn.getResponseCode());
conn.disconnect();
assertEquals(3, DummyAuditLogger.logCount);
assertEquals("1.1.1.1", DummyAuditLogger.remoteAddr);
} finally {
cluster.shutdown();
}
}
/**
* Minor test related to HADOOP-9155. Verify that during a
* FileSystem.setPermission() operation, the stat passed in during the
* logAuditEvent() call returns the new permission rather than the old
* permission.
*/
@Test
public void testAuditLoggerWithSetPermission() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
DummyAuditLogger.class.getName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
FileSystem fs = cluster.getFileSystem();
long time = System.currentTimeMillis();
final Path p = new Path("/");
fs.setTimes(p, time, time);
fs.setPermission(p, new FsPermission(TEST_PERMISSION));
assertEquals(TEST_PERMISSION, DummyAuditLogger.foundPermission);
assertEquals(2, DummyAuditLogger.logCount);
} finally {
cluster.shutdown();
}
}
@Test
public void testAuditLogWithAclFailure() throws Exception {
final Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
DummyAuditLogger.class.getName());
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
final FSDirectory dir = cluster.getNamesystem().getFSDirectory();
final FSDirectory mockedDir = Mockito.spy(dir);
AccessControlException ex = new AccessControlException();
doThrow(ex).when(mockedDir).getPermissionChecker();
cluster.getNamesystem().setFSDirectory(mockedDir);
assertTrue(DummyAuditLogger.initialized);
DummyAuditLogger.resetLogCount();
final FileSystem fs = cluster.getFileSystem();
final Path p = new Path("/");
final List<AclEntry> acls = Lists.newArrayList();
try {
fs.getAclStatus(p);
} catch (AccessControlException ignored) {}
try {
fs.setAcl(p, acls);
} catch (AccessControlException ignored) {}
try {
fs.removeAcl(p);
} catch (AccessControlException ignored) {}
try {
fs.removeDefaultAcl(p);
} catch (AccessControlException ignored) {}
try {
fs.removeAclEntries(p, acls);
} catch (AccessControlException ignored) {}
try {
fs.modifyAclEntries(p, acls);
} catch (AccessControlException ignored) {}
assertEquals(6, DummyAuditLogger.logCount);
assertEquals(6, DummyAuditLogger.unsuccessfulCount);
} finally {
cluster.shutdown();
}
}
/**
* Tests that a broken audit logger causes requests to fail.
*/
@Test
public void testBrokenLogger() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.set(DFS_NAMENODE_AUDIT_LOGGERS_KEY,
BrokenAuditLogger.class.getName());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitClusterUp();
FileSystem fs = cluster.getFileSystem();
long time = System.currentTimeMillis();
fs.setTimes(new Path("/"), time, time);
fail("Expected exception due to broken audit logger.");
} catch (RemoteException re) {
// Expected.
} finally {
cluster.shutdown();
}
}
public static class DummyAuditLogger implements AuditLogger {
static boolean initialized;
static int logCount;
static int unsuccessfulCount;
static short foundPermission;
static String remoteAddr;
public void initialize(Configuration conf) {
initialized = true;
}
public static void resetLogCount() {
logCount = 0;
unsuccessfulCount = 0;
}
public void logAuditEvent(boolean succeeded, String userName,
InetAddress addr, String cmd, String src, String dst,
FileStatus stat) {
remoteAddr = addr.getHostAddress();
logCount++;
if (!succeeded) {
unsuccessfulCount++;
}
if (stat != null) {
foundPermission = stat.getPermission().toShort();
}
}
}
public static class BrokenAuditLogger implements AuditLogger {
public void initialize(Configuration conf) {
// No op.
}
public void logAuditEvent(boolean succeeded, String userName,
InetAddress addr, String cmd, String src, String dst,
FileStatus stat) {
throw new RuntimeException("uh oh");
}
}
}
| 10,757
| 32
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import java.io.File;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentCaptor;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Joiner;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
public class TestNNStorageRetentionManager {
final Configuration conf = new Configuration();
/**
* For the purpose of this test, purge as many edits as we can
* with no extra "safety cushion"
*/
@Before
public void setNoExtraEditRetention() {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
}
/**
* Test the "easy case" where we have more images in the
* directory than we need to keep. Should purge the
* old ones.
*/
@Test
public void testPurgeEasyCase() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
tc.addImage("/foo1/current/" + getImageFileName(200), true);
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(101,200), true);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(201,300), true);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(301,400), false);
tc.addLog("/foo1/current/" + getInProgressEditsFileName(401), false);
// Test that other files don't get purged
tc.addLog("/foo1/current/VERSION", false);
runTest(tc);
}
/**
* Same as above, but across multiple directories
*/
@Test
public void testPurgeMultipleDirs() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addRoot("/foo2", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
tc.addImage("/foo1/current/" + getImageFileName(200), true);
tc.addImage("/foo2/current/" + getImageFileName(200), true);
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(101, 200), true);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(201, 300), true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 300), true);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(301, 400), false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 400), false);
tc.addLog("/foo1/current/" + getInProgressEditsFileName(401), false);
runTest(tc);
}
/**
* Test that if we have fewer fsimages than the configured
* retention, we don't purge any of them
*/
@Test
public void testPurgeLessThanRetention() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), false);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(101,200), false);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(201,300), false);
tc.addLog("/foo1/current/" + getFinalizedEditsFileName(301,400), false);
tc.addLog("/foo1/current/" + getInProgressEditsFileName(401), false);
runTest(tc);
}
/**
* Check for edge case with no logs present at all.
*/
@Test
public void testNoLogs() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
tc.addImage("/foo1/current/" + getImageFileName(200), true);
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
runTest(tc);
}
/**
* Check for edge case with no logs or images present at all.
*/
@Test
public void testEmptyDir() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
runTest(tc);
}
/**
* Test that old in-progress logs are properly purged
*/
@Test
public void testOldInProgress() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE_AND_EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
tc.addImage("/foo1/current/" + getImageFileName(200), true);
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
tc.addLog("/foo1/current/" + getInProgressEditsFileName(101), true);
runTest(tc);
}
@Test
public void testSeparateEditDirs() throws IOException {
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE);
tc.addRoot("/foo2", NameNodeDirType.EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
tc.addImage("/foo1/current/" + getImageFileName(200), true);
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 200), true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 300), true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 400), false);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false);
runTest(tc);
}
@Test
public void testRetainExtraLogs() throws IOException {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,
50);
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE);
tc.addRoot("/foo2", NameNodeDirType.EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
tc.addImage("/foo1/current/" + getImageFileName(200), true);
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 200), true);
// Since we need 50 extra edits, *do* retain the 201-300 segment
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 300), false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 400), false);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false);
runTest(tc);
}
@Test
public void testRetainExtraLogsLimitedSegments() throws IOException {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY,
150);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_EXTRA_EDITS_SEGMENTS_RETAINED_KEY, 2);
TestCaseDescription tc = new TestCaseDescription();
tc.addRoot("/foo1", NameNodeDirType.IMAGE);
tc.addRoot("/foo2", NameNodeDirType.EDITS);
tc.addImage("/foo1/current/" + getImageFileName(100), true);
tc.addImage("/foo1/current/" + getImageFileName(200), true);
tc.addImage("/foo1/current/" + getImageFileName(300), false);
tc.addImage("/foo1/current/" + getImageFileName(400), false);
// Segments containing txns upto txId 250 are extra and should be purged.
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(1, 100), true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(101, 175), true);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(176) + ".empty",
true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(176, 200), true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(201, 225), true);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(226) + ".corrupt",
true);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(226, 240), true);
// Only retain 2 extra segments. The 301-350 and 351-400 segments are
// considered required, not extra.
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(241, 275), false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(276, 300), false);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(301) + ".empty",
false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(301, 350), false);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(351) + ".corrupt",
false);
tc.addLog("/foo2/current/" + getFinalizedEditsFileName(351, 400), false);
tc.addLog("/foo2/current/" + getInProgressEditsFileName(401), false);
runTest(tc);
}
private void runTest(TestCaseDescription tc) throws IOException {
StoragePurger mockPurger =
Mockito.mock(NNStorageRetentionManager.StoragePurger.class);
ArgumentCaptor<FSImageFile> imagesPurgedCaptor =
ArgumentCaptor.forClass(FSImageFile.class);
ArgumentCaptor<EditLogFile> logsPurgedCaptor =
ArgumentCaptor.forClass(EditLogFile.class);
// Ask the manager to purge files we don't need any more
new NNStorageRetentionManager(conf,
tc.mockStorage(), tc.mockEditLog(mockPurger), mockPurger)
.purgeOldStorage(NameNodeFile.IMAGE);
// Verify that it asked the purger to remove the correct files
Mockito.verify(mockPurger, Mockito.atLeast(0))
.purgeImage(imagesPurgedCaptor.capture());
Mockito.verify(mockPurger, Mockito.atLeast(0))
.purgeLog(logsPurgedCaptor.capture());
// Check images
Set<String> purgedPaths = Sets.newLinkedHashSet();
for (FSImageFile purged : imagesPurgedCaptor.getAllValues()) {
purgedPaths.add(fileToPath(purged.getFile()));
}
Assert.assertEquals(
Joiner.on(",").join(filesToPaths(tc.expectedPurgedImages)),
Joiner.on(",").join(purgedPaths));
// Check images
purgedPaths.clear();
for (EditLogFile purged : logsPurgedCaptor.getAllValues()) {
purgedPaths.add(fileToPath(purged.getFile()));
}
Assert.assertEquals(
Joiner.on(",").join(filesToPaths(tc.expectedPurgedLogs)),
Joiner.on(",").join(purgedPaths));
}
private class TestCaseDescription {
private final Map<File, FakeRoot> dirRoots = Maps.newLinkedHashMap();
private final Set<File> expectedPurgedLogs = Sets.newLinkedHashSet();
private final Set<File> expectedPurgedImages = Sets.newLinkedHashSet();
private class FakeRoot {
final NameNodeDirType type;
final List<File> files;
FakeRoot(NameNodeDirType type) {
this.type = type;
files = Lists.newArrayList();
}
StorageDirectory mockStorageDir() {
return FSImageTestUtil.mockStorageDirectory(
type, false,
filesToPaths(files).toArray(new String[0]));
}
}
void addRoot(String root, NameNodeDirType dir) {
dirRoots.put(new File(root), new FakeRoot(dir));
}
private void addFile(File file) {
for (Map.Entry<File, FakeRoot> entry : dirRoots.entrySet()) {
if (fileToPath(file).startsWith(fileToPath(entry.getKey()))) {
entry.getValue().files.add(file);
}
}
}
void addLog(String path, boolean expectPurge) {
File file = new File(path);
addFile(file);
if (expectPurge) {
expectedPurgedLogs.add(file);
}
}
void addImage(String path, boolean expectPurge) {
File file = new File(path);
addFile(file);
if (expectPurge) {
expectedPurgedImages.add(file);
}
}
NNStorage mockStorage() throws IOException {
List<StorageDirectory> sds = Lists.newArrayList();
for (FakeRoot root : dirRoots.values()) {
sds.add(root.mockStorageDir());
}
return mockStorageForDirs(sds.toArray(new StorageDirectory[0]));
}
@SuppressWarnings("unchecked")
public FSEditLog mockEditLog(StoragePurger purger) throws IOException {
final List<JournalManager> jms = Lists.newArrayList();
final JournalSet journalSet = new JournalSet(0);
for (FakeRoot root : dirRoots.values()) {
if (!root.type.isOfType(NameNodeDirType.EDITS)) continue;
// passing null NNStorage for unit test because it does not use it
FileJournalManager fjm = new FileJournalManager(conf,
root.mockStorageDir(), null);
fjm.purger = purger;
jms.add(fjm);
journalSet.add(fjm, false);
}
FSEditLog mockLog = Mockito.mock(FSEditLog.class);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
assert args.length == 1;
long txId = (Long) args[0];
for (JournalManager jm : jms) {
jm.purgeLogsOlderThan(txId);
}
return null;
}
}).when(mockLog).purgeLogsOlderThan(Mockito.anyLong());
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
Object[] args = invocation.getArguments();
journalSet.selectInputStreams((Collection<EditLogInputStream>)args[0],
(Long)args[1], (Boolean)args[2]);
return null;
}
}).when(mockLog).selectInputStreams(Mockito.anyCollection(),
Mockito.anyLong(), Mockito.anyBoolean());
return mockLog;
}
}
/**
* Converts a file to a platform-agnostic URI path.
*
* @param file File to convert
* @return String path
*/
private static String fileToPath(File file) {
return file.toURI().getPath();
}
/**
* Converts multiple files to platform-agnostic URI paths.
*
* @param files Collection<File> files to convert
* @return Collection<String> paths
*/
private static Collection<String> filesToPaths(Collection<File> files) {
List<String> paths = Lists.newArrayList();
for (File file: files) {
paths.add(fileToPath(file));
}
return paths;
}
private static NNStorage mockStorageForDirs(final StorageDirectory ... mockDirs)
throws IOException {
NNStorage mockStorage = Mockito.mock(NNStorage.class);
Mockito.doAnswer(new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
FSImageStorageInspector inspector =
(FSImageStorageInspector) invocation.getArguments()[0];
for (StorageDirectory sd : mockDirs) {
inspector.inspectDirectory(sd);
}
return null;
}
}).when(mockStorage).inspectStorageDirs(
Mockito.<FSImageStorageInspector>anyObject());
return mockStorage;
}
}
| 16,707
| 38.592417
| 90
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* Test the EditLogFileOutputStream
*/
public class TestEditLogFileOutputStream {
private final static File TEST_DIR = PathUtils
.getTestDir(TestEditLogFileOutputStream.class);
private static final File TEST_EDITS = new File(TEST_DIR,
"testEditLogFileOutput.log");
final static int MIN_PREALLOCATION_LENGTH = EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH;
private Configuration conf;
@BeforeClass
public static void disableFsync() {
// No need to fsync for the purposes of tests. This makes
// the tests run much faster.
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
@Before
@After
public void deleteEditsFile() {
if (TEST_EDITS.exists())
TEST_EDITS.delete();
}
@Before
public void setUp() {
conf = new Configuration();
}
static void flushAndCheckLength(EditLogFileOutputStream elos,
long expectedLength) throws IOException {
elos.setReadyToFlush();
elos.flushAndSync(true);
assertEquals(expectedLength, elos.getFile().length());
}
/**
* Tests writing to the EditLogFileOutputStream. Due to preallocation, the
* length of the edit log will usually be longer than its valid contents.
*/
@Test
public void testRawWrites() throws IOException {
EditLogFileOutputStream elos = new EditLogFileOutputStream(conf,
TEST_EDITS, 0);
try {
byte[] small = new byte[] { 1, 2, 3, 4, 5, 8, 7 };
elos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// The first (small) write we make extends the file by 1 MB due to
// preallocation.
elos.writeRaw(small, 0, small.length);
flushAndCheckLength(elos, MIN_PREALLOCATION_LENGTH);
// The next small write we make goes into the area that was already
// preallocated.
elos.writeRaw(small, 0, small.length);
flushAndCheckLength(elos, MIN_PREALLOCATION_LENGTH);
// Now we write enough bytes so that we exceed the minimum preallocated
// length.
final int BIG_WRITE_LENGTH = 3 * MIN_PREALLOCATION_LENGTH;
byte[] buf = new byte[4096];
for (int i = 0; i < buf.length; i++) {
buf[i] = 0;
}
int total = BIG_WRITE_LENGTH;
while (total > 0) {
int toWrite = (total > buf.length) ? buf.length : total;
elos.writeRaw(buf, 0, toWrite);
total -= toWrite;
}
flushAndCheckLength(elos, 4 * MIN_PREALLOCATION_LENGTH);
} finally {
if (elos != null)
elos.close();
}
}
/**
* Tests EditLogFileOutputStream doesn't throw NullPointerException on
* close/abort sequence. See HDFS-2011.
*/
@Test
public void testEditLogFileOutputStreamCloseAbort() throws IOException {
// abort after a close should just ignore
EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf,
TEST_EDITS, 0);
editLogStream.close();
editLogStream.abort();
}
/**
* Tests EditLogFileOutputStream doesn't throw NullPointerException on
* close/close sequence. See HDFS-2011.
*/
@Test
public void testEditLogFileOutputStreamCloseClose() throws IOException {
// close after a close should result in an IOE
EditLogFileOutputStream editLogStream = new EditLogFileOutputStream(conf,
TEST_EDITS, 0);
editLogStream.close();
try {
editLogStream.close();
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("Trying to use aborted output stream"));
}
}
/**
* Tests EditLogFileOutputStream doesn't throw NullPointerException on being
* abort/abort sequence. See HDFS-2011.
*/
@Test
public void testEditLogFileOutputStreamAbortAbort() throws IOException {
// abort after a close should just ignore
EditLogFileOutputStream editLogStream = null;
try {
editLogStream = new EditLogFileOutputStream(conf, TEST_EDITS, 0);
editLogStream.abort();
editLogStream.abort();
} finally {
IOUtils.cleanup(null, editLogStream);
}
}
}
| 5,319
| 31.839506
| 95
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.util.Arrays;
import java.util.Enumeration;
import java.util.List;
import java.util.Vector;
import javax.servlet.ServletContext;
import javax.servlet.ServletOutputStream;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.net.NetUtils;
import org.junit.Test;
import org.mockito.Mockito;
import org.mortbay.jetty.InclusiveByteRange;
/*
* Mock input stream class that always outputs the current position of the stream.
*/
class MockFSInputStream extends FSInputStream {
long currentPos = 0;
@Override
public int read() throws IOException {
return (int)(currentPos++);
}
@Override
public void close() throws IOException {
}
@Override
public void seek(long pos) throws IOException {
currentPos = pos;
}
@Override
public long getPos() throws IOException {
return currentPos;
}
@Override
public boolean seekToNewSource(long targetPos) throws IOException {
return false;
}
}
public class TestStreamFile {
private final HdfsConfiguration CONF = new HdfsConfiguration();
private final DFSClient clientMock = Mockito.mock(DFSClient.class);
private final HttpServletRequest mockHttpServletRequest =
Mockito.mock(HttpServletRequest.class);
private final HttpServletResponse mockHttpServletResponse =
Mockito.mock(HttpServletResponse.class);
private final ServletContext mockServletContext =
Mockito.mock(ServletContext.class);
final StreamFile sfile = new StreamFile() {
private static final long serialVersionUID = -5513776238875189473L;
@Override
public ServletContext getServletContext() {
return mockServletContext;
}
@Override
protected DFSClient getDFSClient(HttpServletRequest request)
throws IOException, InterruptedException {
return clientMock;
}
};
// return an array matching the output of mockfsinputstream
private static byte[] getOutputArray(int start, int count) {
byte[] a = new byte[count];
for (int i = 0; i < count; i++) {
a[i] = (byte)(start+i);
}
return a;
}
@Test
public void testWriteTo() throws IOException {
FSDataInputStream fsdin = new FSDataInputStream(new MockFSInputStream());
ByteArrayOutputStream os = new ByteArrayOutputStream();
// new int[]{s_1, c_1, s_2, c_2, ..., s_n, c_n} means to test
// reading c_i bytes starting at s_i
int[] pairs = new int[]{ 0, 10000,
50, 100,
50, 6000,
1000, 2000,
0, 1,
0, 0,
5000, 0,
};
assertTrue("Pairs array must be even", pairs.length % 2 == 0);
for (int i = 0; i < pairs.length; i+=2) {
StreamFile.copyFromOffset(fsdin, os, pairs[i], pairs[i+1]);
assertArrayEquals("Reading " + pairs[i+1]
+ " bytes from offset " + pairs[i],
getOutputArray(pairs[i], pairs[i+1]),
os.toByteArray());
os.reset();
}
}
@SuppressWarnings("unchecked")
private List<InclusiveByteRange> strToRanges(String s, int contentLength) {
List<String> l = Arrays.asList(new String[]{"bytes="+s});
Enumeration<?> e = (new Vector<String>(l)).elements();
return InclusiveByteRange.satisfiableRanges(e, contentLength);
}
@Test
public void testSendPartialData() throws IOException {
FSDataInputStream in = new FSDataInputStream(new MockFSInputStream());
ByteArrayOutputStream os = new ByteArrayOutputStream();
// test if multiple ranges, then 416
{
List<InclusiveByteRange> ranges = strToRanges("0-,10-300", 500);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
StreamFile.sendPartialData(in, os, response, 500, ranges);
// Multiple ranges should result in a 416 error
Mockito.verify(response).setStatus(416);
}
// test if no ranges, then 416
{
os.reset();
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
StreamFile.sendPartialData(in, os, response, 500, null);
// No ranges should result in a 416 error
Mockito.verify(response).setStatus(416);
}
// test if invalid single range (out of bounds), then 416
{
List<InclusiveByteRange> ranges = strToRanges("600-800", 500);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
StreamFile.sendPartialData(in, os, response, 500, ranges);
// Single (but invalid) range should result in a 416
Mockito.verify(response).setStatus(416);
}
// test if one (valid) range, then 206
{
List<InclusiveByteRange> ranges = strToRanges("100-300", 500);
HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
StreamFile.sendPartialData(in, os, response, 500, ranges);
// Single (valid) range should result in a 206
Mockito.verify(response).setStatus(206);
assertArrayEquals("Byte range from 100-300",
getOutputArray(100, 201),
os.toByteArray());
}
}
// Test for positive scenario
@Test
public void testDoGetShouldWriteTheFileContentIntoServletOutputStream()
throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1)
.build();
try {
Path testFile = createFile();
setUpForDoGetTest(cluster, testFile);
ServletOutputStreamExtn outStream = new ServletOutputStreamExtn();
Mockito.doReturn(outStream).when(mockHttpServletResponse)
.getOutputStream();
StreamFile sfile = new StreamFile() {
private static final long serialVersionUID = 7715590481809562722L;
@Override
public ServletContext getServletContext() {
return mockServletContext;
}
};
sfile.doGet(mockHttpServletRequest, mockHttpServletResponse);
assertEquals("Not writing the file data into ServletOutputStream",
outStream.getResult(), "test");
} finally {
cluster.shutdown();
}
}
// Test for cleaning the streams in exception cases also
@Test
public void testDoGetShouldCloseTheDFSInputStreamIfResponseGetOutPutStreamThrowsAnyException()
throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1)
.build();
try {
Path testFile = createFile();
setUpForDoGetTest(cluster, testFile);
Mockito.doThrow(new IOException()).when(mockHttpServletResponse)
.getOutputStream();
DFSInputStream fsMock = Mockito.mock(DFSInputStream.class);
Mockito.doReturn(fsMock).when(clientMock).open(testFile.toString());
Mockito.doReturn(Long.valueOf(4)).when(fsMock).getFileLength();
try {
sfile.doGet(mockHttpServletRequest, mockHttpServletResponse);
fail("Not throwing the IOException");
} catch (IOException e) {
Mockito.verify(clientMock, Mockito.atLeastOnce()).close();
}
} finally {
cluster.shutdown();
}
}
private void setUpForDoGetTest(MiniDFSCluster cluster, Path testFile) {
Mockito.doReturn(CONF).when(mockServletContext).getAttribute(
JspHelper.CURRENT_CONF);
Mockito.doReturn(NetUtils.getHostPortString(NameNode.getAddress(CONF)))
.when(mockHttpServletRequest).getParameter("nnaddr");
Mockito.doReturn(testFile.toString()).when(mockHttpServletRequest)
.getPathInfo();
Mockito.doReturn("/streamFile"+testFile.toString()).when(mockHttpServletRequest)
.getRequestURI();
}
static Path writeFile(FileSystem fs, Path f) throws IOException {
DataOutputStream out = fs.create(f);
try {
out.writeBytes("test");
} finally {
out.close();
}
assertTrue(fs.exists(f));
return f;
}
private Path createFile() throws IOException {
FileSystem fs = FileSystem.get(CONF);
Path testFile = new Path("/test/mkdirs/doGet");
writeFile(fs, testFile);
return testFile;
}
public static class ServletOutputStreamExtn extends ServletOutputStream {
private final StringBuffer buffer = new StringBuffer(3);
public String getResult() {
return buffer.toString();
}
@Override
public void write(int b) throws IOException {
buffer.append((char) b);
}
}
}
| 10,095
| 31.152866
| 96
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclTransformation.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTransformation.*;
import static org.junit.Assert.*;
import java.util.List;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
import org.junit.Test;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.AclException;
/**
* Tests operations that modify ACLs. All tests in this suite have been
* cross-validated against Linux setfacl/getfacl to check for consistency of the
* HDFS implementation.
*/
public class TestAclTransformation {
private static final List<AclEntry> ACL_SPEC_TOO_LARGE;
private static final List<AclEntry> ACL_SPEC_DEFAULT_TOO_LARGE;
static {
ACL_SPEC_TOO_LARGE = Lists.newArrayListWithCapacity(33);
ACL_SPEC_DEFAULT_TOO_LARGE = Lists.newArrayListWithCapacity(33);
for (int i = 0; i < 33; ++i) {
ACL_SPEC_TOO_LARGE.add(aclEntry(ACCESS, USER, "user" + i, ALL));
ACL_SPEC_DEFAULT_TOO_LARGE.add(aclEntry(DEFAULT, USER, "user" + i, ALL));
}
}
@Test
public void testFilterAclEntriesByAclSpec() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_WRITE))
.add(aclEntry(ACCESS, USER, "diana", READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, GROUP, "sales", READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "execs", READ_WRITE))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana"),
aclEntry(ACCESS, GROUP, "sales"));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, GROUP, "execs", READ_WRITE))
.add(aclEntry(ACCESS, MASK, READ_WRITE))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecUnchanged() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", ALL))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "sales", ALL))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "clark"),
aclEntry(ACCESS, GROUP, "execs"));
assertEquals(existing, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecAccessMaskCalculated()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ_WRITE))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana"));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecDefaultMaskCalculated()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, USER, "diana", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "diana"));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecDefaultMaskPreserved()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ_WRITE))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "diana", ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana"));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "diana", ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecAccessMaskPreserved()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, USER, "diana", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "diana"));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecAutomaticDefaultUser()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, READ_WRITE))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecAutomaticDefaultGroup()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, GROUP));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecAutomaticDefaultOther()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, OTHER));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
assertEquals(expected, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test
public void testFilterAclEntriesByAclSpecEmptyAclSpec() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList();
assertEquals(existing, filterAclEntriesByAclSpec(existing, aclSpec));
}
@Test(expected=AclException.class)
public void testFilterAclEntriesByAclSpecRemoveAccessMaskRequired()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, MASK));
filterAclEntriesByAclSpec(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testFilterAclEntriesByAclSpecRemoveDefaultMaskRequired()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, MASK));
filterAclEntriesByAclSpec(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testFilterAclEntriesByAclSpecInputTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
filterAclEntriesByAclSpec(existing, ACL_SPEC_TOO_LARGE);
}
@Test(expected = AclException.class)
public void testFilterDefaultAclEntriesByAclSpecInputTooLarge()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
filterAclEntriesByAclSpec(existing, ACL_SPEC_DEFAULT_TOO_LARGE);
}
@Test
public void testFilterDefaultAclEntries() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "sales", READ_EXECUTE))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, GROUP, "sales", READ_EXECUTE))
.add(aclEntry(DEFAULT, MASK, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, READ_EXECUTE))
.build();
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "sales", READ_EXECUTE))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
assertEquals(expected, filterDefaultAclEntries(existing));
}
@Test
public void testFilterDefaultAclEntriesUnchanged() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", ALL))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "sales", ALL))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
assertEquals(existing, filterDefaultAclEntries(existing));
}
@Test
public void testMergeAclEntries() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", ALL))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesUnchanged() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", ALL))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "sales", ALL))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", ALL))
.add(aclEntry(DEFAULT, GROUP, READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, "sales", ALL))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", ALL),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, GROUP, "sales", ALL),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE));
assertEquals(existing, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesMultipleNewBeforeExisting()
throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "diana", READ))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, MASK, READ_EXECUTE))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, USER, "clark", READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_EXECUTE))
.add(aclEntry(ACCESS, USER, "clark", READ_EXECUTE))
.add(aclEntry(ACCESS, USER, "diana", READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, MASK, READ_EXECUTE))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesAccessMaskCalculated() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_EXECUTE))
.add(aclEntry(ACCESS, USER, "diana", READ))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ_EXECUTE))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesDefaultMaskCalculated() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_WRITE),
aclEntry(DEFAULT, USER, "diana", READ_EXECUTE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE))
.add(aclEntry(DEFAULT, USER, "diana", READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesDefaultMaskPreserved() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "diana", ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana", FsAction.READ_EXECUTE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "diana", READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ_EXECUTE))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "diana", ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesAccessMaskPreserved() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, USER, "diana", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "diana", READ_EXECUTE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, USER, "diana", READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ_EXECUTE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesAutomaticDefaultUser() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, GROUP, READ_EXECUTE))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesAutomaticDefaultGroup() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesAutomaticDefaultOther() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, READ_EXECUTE),
aclEntry(DEFAULT, GROUP, READ_EXECUTE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, READ_EXECUTE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesProvidedAccessMask() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, MASK, ALL));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesProvidedDefaultMask() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, mergeAclEntries(existing, aclSpec));
}
@Test
public void testMergeAclEntriesEmptyAclSpec() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList();
assertEquals(existing, mergeAclEntries(existing, aclSpec));
}
@Test(expected=AclException.class)
public void testMergeAclEntriesInputTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
mergeAclEntries(existing, ACL_SPEC_TOO_LARGE);
}
@Test(expected=AclException.class)
public void testMergeAclDefaultEntriesInputTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
mergeAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE);
}
@Test(expected=AclException.class)
public void testMergeAclEntriesResultTooLarge() throws AclException {
ImmutableList.Builder<AclEntry> aclBuilder =
new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL));
for (int i = 1; i <= 28; ++i) {
aclBuilder.add(aclEntry(ACCESS, USER, "user" + i, READ));
}
aclBuilder
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, NONE));
List<AclEntry> existing = aclBuilder.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
mergeAclEntries(existing, aclSpec);
}
@Test(expected = AclException.class)
public void testMergeAclDefaultEntriesResultTooLarge() throws AclException {
ImmutableList.Builder<AclEntry> aclBuilder =
new ImmutableList.Builder<AclEntry>()
.add(aclEntry(DEFAULT, USER, ALL));
for (int i = 1; i <= 28; ++i) {
aclBuilder.add(aclEntry(DEFAULT, USER, "user" + i, READ));
}
aclBuilder
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE));
List<AclEntry> existing = aclBuilder.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ));
mergeAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testMergeAclEntriesDuplicateEntries() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, USER, "diana", READ_WRITE),
aclEntry(ACCESS, USER, "clark", READ),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE));
mergeAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testMergeAclEntriesNamedMask() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, MASK, "bruce", READ_EXECUTE));
mergeAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testMergeAclEntriesNamedOther() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE));
mergeAclEntries(existing, aclSpec);
}
@Test
public void testReplaceAclEntries() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", ALL))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", ALL),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_WRITE),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, GROUP, "sales", ALL),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "sales", ALL))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, "sales", ALL))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesUnchanged() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", ALL))
.add(aclEntry(ACCESS, GROUP, READ_EXECUTE))
.add(aclEntry(ACCESS, GROUP, "sales", ALL))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", ALL))
.add(aclEntry(DEFAULT, GROUP, READ_EXECUTE))
.add(aclEntry(DEFAULT, GROUP, "sales", ALL))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", ALL),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, GROUP, "sales", ALL),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE));
assertEquals(existing, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesAccessMaskCalculated() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ),
aclEntry(ACCESS, USER, "diana", READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ_WRITE))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesDefaultMaskCalculated() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ),
aclEntry(DEFAULT, USER, "diana", READ_WRITE),
aclEntry(DEFAULT, GROUP, ALL),
aclEntry(DEFAULT, OTHER, READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, USER, "diana", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, ALL))
.add(aclEntry(DEFAULT, MASK, ALL))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesDefaultMaskPreserved() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ_WRITE))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "diana", ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ),
aclEntry(ACCESS, USER, "diana", READ_WRITE),
aclEntry(ACCESS, GROUP, ALL),
aclEntry(ACCESS, OTHER, READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, ALL))
.add(aclEntry(ACCESS, MASK, ALL))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "diana", ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesAccessMaskPreserved() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, USER, "diana", READ_WRITE))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ),
aclEntry(DEFAULT, GROUP, READ),
aclEntry(DEFAULT, OTHER, NONE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, USER, "bruce", READ))
.add(aclEntry(ACCESS, USER, "diana", READ_WRITE))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, MASK, READ))
.add(aclEntry(ACCESS, OTHER, READ))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesAutomaticDefaultUser() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "bruce", READ),
aclEntry(DEFAULT, GROUP, READ_WRITE),
aclEntry(DEFAULT, MASK, READ_WRITE),
aclEntry(DEFAULT, OTHER, READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ_WRITE))
.add(aclEntry(DEFAULT, MASK, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesAutomaticDefaultGroup() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, READ_WRITE),
aclEntry(DEFAULT, USER, "bruce", READ),
aclEntry(DEFAULT, MASK, READ),
aclEntry(DEFAULT, OTHER, READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, READ_WRITE))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, READ))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesAutomaticDefaultOther() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, READ_WRITE),
aclEntry(DEFAULT, USER, "bruce", READ),
aclEntry(DEFAULT, GROUP, READ_WRITE),
aclEntry(DEFAULT, MASK, READ_WRITE));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, READ_WRITE))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ_WRITE))
.add(aclEntry(DEFAULT, MASK, READ_WRITE))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test
public void testReplaceAclEntriesOnlyDefaults() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ));
List<AclEntry> expected = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, USER, "bruce", READ))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, MASK, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
assertEquals(expected, replaceAclEntries(existing, aclSpec));
}
@Test(expected=AclException.class)
public void testReplaceAclEntriesInputTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
replaceAclEntries(existing, ACL_SPEC_TOO_LARGE);
}
@Test(expected=AclException.class)
public void testReplaceAclDefaultEntriesInputTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(DEFAULT, USER, ALL))
.add(aclEntry(DEFAULT, GROUP, READ))
.add(aclEntry(DEFAULT, OTHER, NONE))
.build();
replaceAclEntries(existing, ACL_SPEC_DEFAULT_TOO_LARGE);
}
@Test(expected=AclException.class)
public void testReplaceAclEntriesResultTooLarge() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayListWithCapacity(32);
aclSpec.add(aclEntry(ACCESS, USER, ALL));
for (int i = 1; i <= 29; ++i) {
aclSpec.add(aclEntry(ACCESS, USER, "user" + i, READ));
}
aclSpec.add(aclEntry(ACCESS, GROUP, READ));
aclSpec.add(aclEntry(ACCESS, OTHER, NONE));
// The ACL spec now has 32 entries. Automatic mask calculation will push it
// over the limit to 33.
replaceAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testReplaceAclEntriesDuplicateEntries() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, USER, "diana", READ_WRITE),
aclEntry(ACCESS, USER, "clark", READ),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
replaceAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testReplaceAclEntriesNamedMask() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(ACCESS, MASK, "bruce", READ_EXECUTE));
replaceAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testReplaceAclEntriesNamedOther() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(ACCESS, OTHER, "bruce", READ_EXECUTE));
replaceAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testReplaceAclEntriesMissingUser() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", ALL),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, NONE));
replaceAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testReplaceAclEntriesMissingGroup() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, "sales", ALL),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, NONE));
replaceAclEntries(existing, aclSpec);
}
@Test(expected=AclException.class)
public void testReplaceAclEntriesMissingOther() throws AclException {
List<AclEntry> existing = new ImmutableList.Builder<AclEntry>()
.add(aclEntry(ACCESS, USER, ALL))
.add(aclEntry(ACCESS, GROUP, READ))
.add(aclEntry(ACCESS, OTHER, NONE))
.build();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, GROUP, "sales", ALL),
aclEntry(ACCESS, MASK, ALL));
replaceAclEntries(existing, aclSpec);
}
}
| 51,232
| 39.725755
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicReference;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
/**
* This class tests various synchronization bugs in FSEditLog rolling
* and namespace saving.
*/
public class TestEditLogRace {
static {
((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL);
}
private static final Log LOG = LogFactory.getLog(TestEditLogRace.class);
private static final String NAME_DIR =
MiniDFSCluster.getBaseDirectory() + "name1";
// This test creates NUM_THREADS threads and each thread continuously writes
// transactions
static final int NUM_THREADS = 16;
/**
* The number of times to roll the edit log during the test. Since this
* tests for a race condition, higher numbers are more likely to find
* a bug if it exists, but the test will take longer.
*/
static final int NUM_ROLLS = 30;
/**
* The number of times to save the fsimage and create an empty edit log.
*/
static final int NUM_SAVE_IMAGE = 30;
private final List<Transactions> workers = new ArrayList<Transactions>();
private static final int NUM_DATA_NODES = 1;
/**
* Several of the test cases work by introducing a sleep
* into an operation that is usually fast, and then verifying
* that another operation blocks for at least this amount of time.
* This value needs to be significantly longer than the average
* time for an fsync() or enterSafeMode().
*/
private static final int BLOCK_TIME = 10;
//
// an object that does a bunch of transactions
//
static class Transactions implements Runnable {
final NamenodeProtocols nn;
short replication = 3;
long blockSize = 64;
volatile boolean stopped = false;
volatile Thread thr;
final AtomicReference<Throwable> caught;
Transactions(NamenodeProtocols ns, AtomicReference<Throwable> caught) {
nn = ns;
this.caught = caught;
}
// add a bunch of transactions.
@Override
public void run() {
thr = Thread.currentThread();
FsPermission p = new FsPermission((short)0777);
int i = 0;
while (!stopped) {
try {
String dirname = "/thr-" + thr.getId() + "-dir-" + i;
nn.mkdirs(dirname, p, true);
nn.delete(dirname, true);
} catch (SafeModeException sme) {
// This is OK - the tests will bring NN in and out of safemode
} catch (Throwable e) {
LOG.warn("Got error in transaction thread", e);
caught.compareAndSet(null, e);
break;
}
i++;
}
}
public void stop() {
stopped = true;
}
public Thread getThread() {
return thr;
}
}
private void startTransactionWorkers(NamenodeProtocols namesystem,
AtomicReference<Throwable> caughtErr) {
// Create threads and make them run transactions concurrently.
for (int i = 0; i < NUM_THREADS; i++) {
Transactions trans = new Transactions(namesystem, caughtErr);
new Thread(trans, "TransactionThread-" + i).start();
workers.add(trans);
}
}
private void stopTransactionWorkers() {
// wait for all transactions to get over
for (Transactions worker : workers) {
worker.stop();
}
for (Transactions worker : workers) {
Thread thr = worker.getThread();
try {
if (thr != null) thr.join();
} catch (InterruptedException ignored) {}
}
}
/**
* Tests rolling edit logs while transactions are ongoing.
*/
@Test
public void testEditLogRolling() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final NamenodeProtocols nn = cluster.getNameNode().getRpcServer();
FSImage fsimage = cluster.getNamesystem().getFSImage();
StorageDirectory sd = fsimage.getStorage().getStorageDir(0);
startTransactionWorkers(nn, caughtErr);
long previousLogTxId = 1;
for (int i = 0; i < NUM_ROLLS && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
} catch (InterruptedException e) {}
LOG.info("Starting roll " + i + ".");
CheckpointSignature sig = nn.rollEditLog();
long nextLog = sig.curSegmentTxId;
String logFileName = NNStorage.getFinalizedEditsFileName(
previousLogTxId, nextLog - 1);
previousLogTxId += verifyEditLogs(cluster.getNamesystem(), fsimage,
logFileName, previousLogTxId);
assertEquals(previousLogTxId, nextLog);
File expectedLog = NNStorage.getInProgressEditsFile(sd, previousLogTxId);
assertTrue("Expect " + expectedLog + " to exist", expectedLog.exists());
}
} finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
private long verifyEditLogs(FSNamesystem namesystem, FSImage fsimage,
String logFileName, long startTxId)
throws IOException {
long numEdits = -1;
// Verify that we can read in all the transactions that we have written.
// If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception.
for (StorageDirectory sd :
fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
File editFile = new File(sd.getCurrentDir(), logFileName);
System.out.println("Verifying file: " + editFile);
FSEditLogLoader loader = new FSEditLogLoader(namesystem, startTxId);
long numEditsThisLog = loader.loadFSEdits(
new EditLogFileInputStream(editFile), startTxId);
System.out.println("Number of edits: " + numEditsThisLog);
assertTrue(numEdits == -1 || numEditsThisLog == numEdits);
numEdits = numEditsThisLog;
}
assertTrue(numEdits != -1);
return numEdits;
}
/**
* Tests saving fs image while transactions are ongoing.
*/
@Test
public void testSaveNamespace() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
AtomicReference<Throwable> caughtErr = new AtomicReference<Throwable>();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
final NamenodeProtocols nn = cluster.getNameNodeRpc();
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = fsimage.getEditLog();
startTransactionWorkers(nn, caughtErr);
for (int i = 0; i < NUM_SAVE_IMAGE && caughtErr.get() == null; i++) {
try {
Thread.sleep(20);
} catch (InterruptedException ignored) {}
LOG.info("Save " + i + ": entering safe mode");
namesystem.enterSafeMode(false);
// Verify edit logs before the save
// They should start with the first edit after the checkpoint
long logStartTxId = fsimage.getStorage().getMostRecentCheckpointTxId() + 1;
verifyEditLogs(namesystem, fsimage,
NNStorage.getInProgressEditsFileName(logStartTxId),
logStartTxId);
LOG.info("Save " + i + ": saving namespace");
namesystem.saveNamespace();
LOG.info("Save " + i + ": leaving safemode");
long savedImageTxId = fsimage.getStorage().getMostRecentCheckpointTxId();
// Verify that edit logs post save got finalized and aren't corrupt
verifyEditLogs(namesystem, fsimage,
NNStorage.getFinalizedEditsFileName(logStartTxId, savedImageTxId),
logStartTxId);
// The checkpoint id should be 1 less than the last written ID, since
// the log roll writes the "BEGIN" transaction to the new log.
assertEquals(fsimage.getStorage().getMostRecentCheckpointTxId(),
editLog.getLastWrittenTxId() - 1);
namesystem.leaveSafeMode();
LOG.info("Save " + i + ": complete");
}
} finally {
stopTransactionWorkers();
if (caughtErr.get() != null) {
throw new RuntimeException(caughtErr.get());
}
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
private Configuration getConf() {
Configuration conf = new HdfsConfiguration();
FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, NAME_DIR);
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR);
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
return conf;
}
/**
* The logSync() method in FSEditLog is unsynchronized whiel syncing
* so that other threads can concurrently enqueue edits while the prior
* sync is ongoing. This test checks that the log is saved correctly
* if the saveImage occurs while the syncing thread is in the unsynchronized middle section.
*
* This replicates the following manual test proposed by Konstantin:
* I start the name-node in debugger.
* I do -mkdir and stop the debugger in logSync() just before it does flush.
* Then I enter safe mode with another client
* I start saveNamepsace and stop the debugger in
* FSImage.saveFSImage() -> FSEditLog.createEditLogFile()
* -> EditLogFileOutputStream.create() ->
* after truncating the file but before writing LAYOUT_VERSION into it.
* Then I let logSync() run.
* Then I terminate the name-node.
* After that the name-node wont start, since the edits file is broken.
*/
@Test
public void testSaveImageWhileSyncInProgress() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem = FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = fsimage.getEditLog();
JournalAndStream jas = editLog.getJournals().get(0);
EditLogFileOutputStream spyElos =
spy((EditLogFileOutputStream)jas.getCurrentStream());
jas.setCurrentStreamForTests(spyElos);
final AtomicReference<Throwable> deferredException =
new AtomicReference<Throwable>();
final CountDownLatch waitToEnterFlush = new CountDownLatch(1);
final Thread doAnEditThread = new Thread() {
@Override
public void run() {
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",
new PermissionStatus("test","test", new FsPermission((short)00755)),
true);
LOG.info("mkdirs complete");
} catch (Throwable ioe) {
LOG.fatal("Got exception", ioe);
deferredException.set(ioe);
waitToEnterFlush.countDown();
}
}
};
Answer<Void> blockingFlush = new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
LOG.info("Flush called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it to flush section...");
// Signal to main thread that the edit thread is in the racy section
waitToEnterFlush.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME*1000);
LOG.info("Going through to flush. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("Flush complete");
return null;
}
};
doAnswer(blockingFlush).when(spyElos).flush();
doAnEditThread.start();
// Wait for the edit thread to get to the logsync unsynchronized section
LOG.info("Main thread: waiting to enter flush...");
waitToEnterFlush.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync is in unsynchronized section.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since flush will sleep that long");
long st = Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et = Time.now();
LOG.info("Entered safe mode");
// Make sure we really waited for the flush to complete!
assertTrue(et - st > (BLOCK_TIME - 1)*1000);
// Once we're in safe mode, save namespace.
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
// We did 3 edits: begin, txn, and end
assertEquals(3, verifyEditLogs(namesystem, fsimage,
NNStorage.getFinalizedEditsFileName(1, 3),
1));
// after the save, just the one "begin"
assertEquals(1, verifyEditLogs(namesystem, fsimage,
NNStorage.getInProgressEditsFileName(4),
4));
} finally {
LOG.info("Closing nn");
if(namesystem != null) namesystem.close();
}
}
/**
* Most of the FSNamesystem methods have a synchronized section where they
* update the name system itself and write to the edit log, and then
* unsynchronized, they call logSync. This test verifies that, if an
* operation has written to the edit log but not yet synced it,
* we wait for that sync before entering safe mode.
*/
@Test
public void testSaveRightBeforeSync() throws Exception {
Configuration conf = getConf();
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
final FSNamesystem namesystem = FSNamesystem.loadFromDisk(conf);
try {
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = spy(fsimage.getEditLog());
DFSTestUtil.setEditLogForTesting(namesystem, editLog);
final AtomicReference<Throwable> deferredException =
new AtomicReference<Throwable>();
final CountDownLatch waitToEnterSync = new CountDownLatch(1);
final Thread doAnEditThread = new Thread() {
@Override
public void run() {
try {
LOG.info("Starting mkdirs");
namesystem.mkdirs("/test",
new PermissionStatus("test","test", new FsPermission((short)00755)),
true);
LOG.info("mkdirs complete");
} catch (Throwable ioe) {
LOG.fatal("Got exception", ioe);
deferredException.set(ioe);
waitToEnterSync.countDown();
}
}
};
Answer<Void> blockingSync = new Answer<Void>() {
@Override
public Void answer(InvocationOnMock invocation) throws Throwable {
LOG.info("logSync called");
if (Thread.currentThread() == doAnEditThread) {
LOG.info("edit thread: Telling main thread we made it just before logSync...");
waitToEnterSync.countDown();
LOG.info("edit thread: sleeping for " + BLOCK_TIME + "secs");
Thread.sleep(BLOCK_TIME*1000);
LOG.info("Going through to logSync. This will allow the main thread to continue.");
}
invocation.callRealMethod();
LOG.info("logSync complete");
return null;
}
};
doAnswer(blockingSync).when(editLog).logSync();
doAnEditThread.start();
LOG.info("Main thread: waiting to just before logSync...");
waitToEnterSync.await();
assertNull(deferredException.get());
LOG.info("Main thread: detected that logSync about to be called.");
LOG.info("Trying to enter safe mode.");
LOG.info("This should block for " + BLOCK_TIME + "sec, since we have pending edits");
long st = Time.now();
namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
long et = Time.now();
LOG.info("Entered safe mode");
// Make sure we really waited for the flush to complete!
assertTrue(et - st > (BLOCK_TIME - 1)*1000);
// Once we're in safe mode, save namespace.
namesystem.saveNamespace();
LOG.info("Joining on edit thread...");
doAnEditThread.join();
assertNull(deferredException.get());
// We did 3 edits: begin, txn, and end
assertEquals(3, verifyEditLogs(namesystem, fsimage,
NNStorage.getFinalizedEditsFileName(1, 3),
1));
// after the save, just the one "begin"
assertEquals(1, verifyEditLogs(namesystem, fsimage,
NNStorage.getInProgressEditsFileName(4),
4));
} finally {
LOG.info("Closing nn");
if(namesystem != null) namesystem.close();
}
}
}
| 19,664
| 35.552045
| 96
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDecommissioningStatus.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.io.PrintStream;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;
import java.util.Random;
import org.apache.commons.io.output.ByteArrayOutputStream;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DecommissionManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This class tests the decommissioning of nodes.
*/
public class TestDecommissioningStatus {
private static final long seed = 0xDEADBEEFL;
private static final int blockSize = 8192;
private static final int fileSize = 16384;
private static final int numDatanodes = 2;
private static MiniDFSCluster cluster;
private static FileSystem fileSys;
private static Path excludeFile;
private static FileSystem localFileSys;
private static Configuration conf;
private static Path dir;
final ArrayList<String> decommissionedNodes = new ArrayList<String>(numDatanodes);
@BeforeClass
public static void setUp() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_REPLICATION_CONSIDERLOAD_KEY,
false);
// Set up the hosts/exclude files.
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
dir = new Path(workingDir, "build/test/data/work-dir/decommission");
assertTrue(localFileSys.mkdirs(dir));
excludeFile = new Path(dir, "exclude");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
Path includeFile = new Path(dir, "include");
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
1000);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
4);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_INTERVAL_KEY, 1);
conf.setLong(DFSConfigKeys.DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY, 1);
writeConfigFile(localFileSys, excludeFile, null);
writeConfigFile(localFileSys, includeFile, null);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
cluster.getNamesystem().getBlockManager().getDatanodeManager()
.setHeartbeatExpireInterval(3000);
Logger.getLogger(DecommissionManager.class).setLevel(Level.DEBUG);
}
@AfterClass
public static void tearDown() throws Exception {
if (localFileSys != null ) cleanupFile(localFileSys, dir);
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
private static void writeConfigFile(FileSystem fs, Path name,
ArrayList<String> nodes) throws IOException {
// delete if it already exists
if (fs.exists(name)) {
fs.delete(name, true);
}
FSDataOutputStream stm = fs.create(name);
if (nodes != null) {
for (Iterator<String> it = nodes.iterator(); it.hasNext();) {
String node = it.next();
stm.writeBytes(node);
stm.writeBytes("\n");
}
}
stm.close();
}
private void writeFile(FileSystem fileSys, Path name, short repl)
throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
private FSDataOutputStream writeIncompleteFile(FileSystem fileSys, Path name,
short repl) throws IOException {
// create and write a file that contains three blocks of data
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096), repl,
blockSize);
byte[] buffer = new byte[fileSize];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
// need to make sure that we actually write out both file blocks
// (see FSOutputSummer#flush)
stm.flush();
// Do not close stream, return it
// so that it is not garbage collected
return stm;
}
static private void cleanupFile(FileSystem fileSys, Path name)
throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
/*
* Decommissions the node at the given index
*/
private String decommissionNode(FSNamesystem namesystem, DFSClient client,
FileSystem localFileSys, int nodeIndex) throws IOException {
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
String nodename = info[nodeIndex].getXferAddr();
decommissionNode(namesystem, localFileSys, nodename);
return nodename;
}
/*
* Decommissions the node by name
*/
private void decommissionNode(FSNamesystem namesystem,
FileSystem localFileSys, String dnName) throws IOException {
System.out.println("Decommissioning node: " + dnName);
// write nodename into the exclude file.
ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
nodes.add(dnName);
writeConfigFile(localFileSys, excludeFile, nodes);
}
private void checkDecommissionStatus(DatanodeDescriptor decommNode,
int expectedUnderRep, int expectedDecommissionOnly,
int expectedUnderRepInOpenFiles) {
assertEquals("Unexpected num under-replicated blocks",
expectedUnderRep,
decommNode.decommissioningStatus.getUnderReplicatedBlocks());
assertEquals("Unexpected number of decom-only replicas",
expectedDecommissionOnly,
decommNode.decommissioningStatus.getDecommissionOnlyReplicas());
assertEquals(
"Unexpected number of replicas in under-replicated open files",
expectedUnderRepInOpenFiles,
decommNode.decommissioningStatus.getUnderReplicatedInOpenFiles());
}
private void checkDFSAdminDecommissionStatus(
List<DatanodeDescriptor> expectedDecomm, DistributedFileSystem dfs,
DFSAdmin admin) throws IOException {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
PrintStream oldOut = System.out;
System.setOut(ps);
try {
// Parse DFSAdmin just to check the count
admin.report(new String[] {"-decommissioning"}, 0);
String[] lines = baos.toString().split("\n");
Integer num = null;
int count = 0;
for (String line: lines) {
if (line.startsWith("Decommissioning datanodes")) {
// Pull out the "(num)" and parse it into an int
String temp = line.split(" ")[2];
num =
Integer.parseInt((String) temp.subSequence(1, temp.length() - 2));
}
if (line.contains("Decommission in progress")) {
count++;
}
}
assertTrue("No decommissioning output", num != null);
assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
num.intValue());
assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
count);
// Check Java API for correct contents
List<DatanodeInfo> decomming =
new ArrayList<DatanodeInfo>(Arrays.asList(dfs
.getDataNodeStats(DatanodeReportType.DECOMMISSIONING)));
assertEquals("Unexpected number of decomming DNs", expectedDecomm.size(),
decomming.size());
for (DatanodeID id : expectedDecomm) {
assertTrue("Did not find expected decomming DN " + id,
decomming.contains(id));
}
} finally {
System.setOut(oldOut);
}
}
/**
* Tests Decommissioning Status in DFS.
*/
@Test
public void testDecommissionStatus() throws Exception {
InetSocketAddress addr = new InetSocketAddress("localhost", cluster
.getNameNodePort());
DFSClient client = new DFSClient(addr, conf);
DatanodeInfo[] info = client.datanodeReport(DatanodeReportType.LIVE);
assertEquals("Number of Datanodes ", 2, info.length);
DistributedFileSystem fileSys = cluster.getFileSystem();
DFSAdmin admin = new DFSAdmin(cluster.getConfiguration(0));
short replicas = numDatanodes;
//
// Decommission one node. Verify the decommission status
//
Path file1 = new Path("decommission.dat");
writeFile(fileSys, file1, replicas);
Path file2 = new Path("decommission1.dat");
FSDataOutputStream st1 = writeIncompleteFile(fileSys, file2, replicas);
for (DataNode d: cluster.getDataNodes()) {
DataNodeTestUtils.triggerBlockReport(d);
}
FSNamesystem fsn = cluster.getNamesystem();
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
for (int iteration = 0; iteration < numDatanodes; iteration++) {
String downnode = decommissionNode(fsn, client, localFileSys, iteration);
dm.refreshNodes(conf);
decommissionedNodes.add(downnode);
BlockManagerTestUtil.recheckDecommissionState(dm);
final List<DatanodeDescriptor> decommissioningNodes = dm.getDecommissioningNodes();
if (iteration == 0) {
assertEquals(decommissioningNodes.size(), 1);
DatanodeDescriptor decommNode = decommissioningNodes.get(0);
checkDecommissionStatus(decommNode, 3, 0, 1);
checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 1),
fileSys, admin);
} else {
assertEquals(decommissioningNodes.size(), 2);
DatanodeDescriptor decommNode1 = decommissioningNodes.get(0);
DatanodeDescriptor decommNode2 = decommissioningNodes.get(1);
// This one is still 3,3,1 since it passed over the UC block
// earlier, before node 2 was decommed
checkDecommissionStatus(decommNode1, 3, 3, 1);
// This one is 4,4,2 since it has the full state
checkDecommissionStatus(decommNode2, 4, 4, 2);
checkDFSAdminDecommissionStatus(decommissioningNodes.subList(0, 2),
fileSys, admin);
}
}
// Call refreshNodes on FSNamesystem with empty exclude file.
// This will remove the datanodes from decommissioning list and
// make them available again.
writeConfigFile(localFileSys, excludeFile, null);
dm.refreshNodes(conf);
st1.close();
cleanupFile(fileSys, file1);
cleanupFile(fileSys, file2);
}
/**
* Verify a DN remains in DECOMMISSION_INPROGRESS state if it is marked
* as dead before decommission has completed. That will allow DN to resume
* the replication process after it rejoins the cluster.
*/
@Test(timeout=120000)
public void testDecommissionStatusAfterDNRestart() throws Exception {
DistributedFileSystem fileSys =
(DistributedFileSystem)cluster.getFileSystem();
// Create a file with one block. That block has one replica.
Path f = new Path("decommission.dat");
DFSTestUtil.createFile(fileSys, f, fileSize, fileSize, fileSize,
(short)1, seed);
// Find the DN that owns the only replica.
RemoteIterator<LocatedFileStatus> fileList = fileSys.listLocatedStatus(f);
BlockLocation[] blockLocations = fileList.next().getBlockLocations();
String dnName = blockLocations[0].getNames()[0];
// Decommission the DN.
FSNamesystem fsn = cluster.getNamesystem();
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
decommissionNode(fsn, localFileSys, dnName);
dm.refreshNodes(conf);
// Stop the DN when decommission is in progress.
// Given DFS_DATANODE_BALANCE_BANDWIDTHPERSEC_KEY is to 1 and the size of
// the block, it will take much longer time that test timeout value for
// the decommission to complete. So when stopDataNode is called,
// decommission should be in progress.
DataNodeProperties dataNodeProperties = cluster.stopDataNode(dnName);
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
while (true) {
dm.fetchDatanodes(null, dead, false);
if (dead.size() == 1) {
break;
}
Thread.sleep(1000);
}
// Force removal of the dead node's blocks.
BlockManagerTestUtil.checkHeartbeat(fsn.getBlockManager());
// Force DatanodeManager to check decommission state.
BlockManagerTestUtil.recheckDecommissionState(dm);
// Verify that the DN remains in DECOMMISSION_INPROGRESS state.
assertTrue("the node should be DECOMMISSION_IN_PROGRESSS",
dead.get(0).isDecommissionInProgress());
// Check DatanodeManager#getDecommissionNodes, make sure it returns
// the node as decommissioning, even if it's dead
List<DatanodeDescriptor> decomlist = dm.getDecommissioningNodes();
assertTrue("The node should be be decommissioning", decomlist.size() == 1);
// Delete the under-replicated file, which should let the
// DECOMMISSION_IN_PROGRESS node become DECOMMISSIONED
cleanupFile(fileSys, f);
BlockManagerTestUtil.recheckDecommissionState(dm);
assertTrue("the node should be decommissioned",
dead.get(0).isDecommissioned());
// Add the node back
cluster.restartDataNode(dataNodeProperties, true);
cluster.waitActive();
// Call refreshNodes on FSNamesystem with empty exclude file.
// This will remove the datanodes from decommissioning list and
// make them available again.
writeConfigFile(localFileSys, excludeFile, null);
dm.refreshNodes(conf);
}
/**
* Verify the support for decommissioning a datanode that is already dead.
* Under this scenario the datanode should immediately be marked as
* DECOMMISSIONED
*/
@Test(timeout=120000)
public void testDecommissionDeadDN() throws Exception {
Logger log = Logger.getLogger(DecommissionManager.class);
log.setLevel(Level.DEBUG);
DatanodeID dnID = cluster.getDataNodes().get(0).getDatanodeId();
String dnName = dnID.getXferAddr();
DataNodeProperties stoppedDN = cluster.stopDataNode(0);
DFSTestUtil.waitForDatanodeState(cluster, dnID.getDatanodeUuid(),
false, 30000);
FSNamesystem fsn = cluster.getNamesystem();
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
DatanodeDescriptor dnDescriptor = dm.getDatanode(dnID);
decommissionNode(fsn, localFileSys, dnName);
dm.refreshNodes(conf);
BlockManagerTestUtil.recheckDecommissionState(dm);
assertTrue(dnDescriptor.isDecommissioned());
// Add the node back
cluster.restartDataNode(stoppedDN, true);
cluster.waitActive();
// Call refreshNodes on FSNamesystem with empty exclude file to remove the
// datanode from decommissioning list and make it available again.
writeConfigFile(localFileSys, excludeFile, null);
dm.refreshNodes(conf);
}
}
| 17,366
| 38.924138
| 89
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.SocketTimeoutException;
import java.net.URL;
import java.util.Collections;
import java.util.List;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.http.HttpServerFunctionalTest;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.collect.ImmutableList;
public class TestTransferFsImage {
private static final File TEST_DIR = PathUtils.getTestDir(TestTransferFsImage.class);
/**
* Regression test for HDFS-1997. Test that, if an exception
* occurs on the client side, it is properly reported as such,
* and reported to the associated NNStorage object.
*/
@Test
public void testClientSideException() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).build();
NNStorage mockStorage = Mockito.mock(NNStorage.class);
List<File> localPath = Collections.singletonList(
new File("/xxxxx-does-not-exist/blah"));
try {
URL fsName = DFSUtil.getInfoServer(
cluster.getNameNode().getServiceRpcAddress(), conf,
DFSUtil.getHttpClientScheme(conf)).toURL();
String id = "getimage=1&txid=0";
TransferFsImage.getFileClient(fsName, id, localPath, mockStorage, false);
fail("Didn't get an exception!");
} catch (IOException ioe) {
Mockito.verify(mockStorage).reportErrorOnFile(localPath.get(0));
assertTrue(
"Unexpected exception: " + StringUtils.stringifyException(ioe),
ioe.getMessage().contains("Unable to download to any storage"));
} finally {
cluster.shutdown();
}
}
/**
* Similar to the above test, except that there are multiple local files
* and one of them can be saved.
*/
@Test
public void testClientSideExceptionOnJustOneDir() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).build();
NNStorage mockStorage = Mockito.mock(NNStorage.class);
List<File> localPaths = ImmutableList.of(
new File("/xxxxx-does-not-exist/blah"),
new File(TEST_DIR, "testfile")
);
try {
URL fsName = DFSUtil.getInfoServer(
cluster.getNameNode().getServiceRpcAddress(), conf,
DFSUtil.getHttpClientScheme(conf)).toURL();
String id = "getimage=1&txid=0";
TransferFsImage.getFileClient(fsName, id, localPaths, mockStorage, false);
Mockito.verify(mockStorage).reportErrorOnFile(localPaths.get(0));
assertTrue("The valid local file should get saved properly",
localPaths.get(1).length() > 0);
} finally {
cluster.shutdown();
}
}
/**
* Test to verify the read timeout
*/
@Test(timeout = 5000)
public void testGetImageTimeout() throws Exception {
HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
try {
testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
TestImageTransferServlet.class);
testServer.start();
URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
TransferFsImage.timeout = 2000;
try {
TransferFsImage.getFileClient(serverURL, "txid=1", null,
null, false);
fail("TransferImage Should fail with timeout");
} catch (SocketTimeoutException e) {
assertEquals("Read should timeout", "Read timed out", e.getMessage());
}
} finally {
if (testServer != null) {
testServer.stop();
}
}
}
/**
* Test to verify the timeout of Image upload
*/
@Test(timeout = 10000)
public void testImageUploadTimeout() throws Exception {
Configuration conf = new HdfsConfiguration();
NNStorage mockStorage = Mockito.mock(NNStorage.class);
HttpServer2 testServer = HttpServerFunctionalTest.createServer("hdfs");
try {
testServer.addServlet("ImageTransfer", ImageServlet.PATH_SPEC,
TestImageTransferServlet.class);
testServer.start();
URL serverURL = HttpServerFunctionalTest.getServerURL(testServer);
// set the timeout here, otherwise it will take default.
TransferFsImage.timeout = 2000;
File tmpDir = new File(new FileSystemTestHelper().getTestRootDir());
tmpDir.mkdirs();
File mockImageFile = File.createTempFile("image", "", tmpDir);
FileOutputStream imageFile = new FileOutputStream(mockImageFile);
imageFile.write("data".getBytes());
imageFile.close();
Mockito.when(
mockStorage.findImageFile(Mockito.any(NameNodeFile.class),
Mockito.anyLong())).thenReturn(mockImageFile);
Mockito.when(mockStorage.toColonSeparatedString()).thenReturn(
"storage:info:string");
try {
TransferFsImage.uploadImageFromStorage(serverURL, conf, mockStorage,
NameNodeFile.IMAGE, 1L);
fail("TransferImage Should fail with timeout");
} catch (SocketTimeoutException e) {
assertEquals("Upload should timeout", "Read timed out", e.getMessage());
}
} finally {
testServer.stop();
}
}
public static class TestImageTransferServlet extends HttpServlet {
private static final long serialVersionUID = 1L;
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
synchronized (this) {
try {
wait(5000);
} catch (InterruptedException e) {
// Ignore
}
}
}
@Override
protected void doPut(HttpServletRequest req, HttpServletResponse resp)
throws ServletException, IOException {
synchronized (this) {
try {
wait(5000);
} catch (InterruptedException e) {
// Ignore
}
}
}
}
}
| 7,535
| 34.051163
| 87
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetaSave.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedReader;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This class tests the creation and validation of metasave
*/
public class TestMetaSave {
static final int NUM_DATA_NODES = 2;
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
private static MiniDFSCluster cluster = null;
private static FileSystem fileSys = null;
private static NamenodeProtocols nnRpc = null;
private void createFile(FileSystem fileSys, Path name) throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) 2, blockSize);
byte[] buffer = new byte[1024];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
@BeforeClass
public static void setUp() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
// High value of replication interval
// so that blocks remain under-replicated
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1000);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
nnRpc = cluster.getNameNodeRpc();
}
/**
* Tests metasave
*/
@Test
public void testMetaSave() throws IOException, InterruptedException {
for (int i = 0; i < 2; i++) {
Path file = new Path("/filestatus" + i);
createFile(fileSys, file);
}
cluster.stopDataNode(1);
// wait for namenode to discover that a datanode is dead
Thread.sleep(15000);
nnRpc.setReplication("/filestatus0", (short) 4);
nnRpc.metaSave("metasave.out.txt");
// Verification
FileInputStream fstream = new FileInputStream(getLogFile(
"metasave.out.txt"));
DataInputStream in = new DataInputStream(fstream);
BufferedReader reader = null;
try {
reader = new BufferedReader(new InputStreamReader(in));
String line = reader.readLine();
assertTrue(line.equals(
"3 files and directories, 2 blocks = 5 total"));
line = reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line = reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
reader.readLine();
line = reader.readLine();
assertTrue(line.matches("^/filestatus[01]:.*"));
} finally {
if (reader != null)
reader.close();
}
}
/**
* Tests metasave after delete, to make sure there are no orphaned blocks
*/
@Test
public void testMetasaveAfterDelete()
throws IOException, InterruptedException {
for (int i = 0; i < 2; i++) {
Path file = new Path("/filestatus" + i);
createFile(fileSys, file);
}
cluster.stopDataNode(1);
// wait for namenode to discover that a datanode is dead
Thread.sleep(15000);
nnRpc.setReplication("/filestatus0", (short) 4);
nnRpc.delete("/filestatus0", true);
nnRpc.delete("/filestatus1", true);
nnRpc.metaSave("metasaveAfterDelete.out.txt");
// Verification
BufferedReader reader = null;
try {
FileInputStream fstream = new FileInputStream(getLogFile(
"metasaveAfterDelete.out.txt"));
DataInputStream in = new DataInputStream(fstream);
reader = new BufferedReader(new InputStreamReader(in));
reader.readLine();
String line = reader.readLine();
assertTrue(line.equals("Live Datanodes: 1"));
line = reader.readLine();
assertTrue(line.equals("Dead Datanodes: 1"));
line = reader.readLine();
assertTrue(line.equals("Metasave: Blocks waiting for replication: 0"));
line = reader.readLine();
assertTrue(line.equals("Mis-replicated blocks that have been postponed:"));
line = reader.readLine();
assertTrue(line.equals("Metasave: Blocks being replicated: 0"));
} finally {
if (reader != null)
reader.close();
}
}
/**
* Tests that metasave overwrites the output file (not append).
*/
@Test
public void testMetaSaveOverwrite() throws Exception {
// metaSave twice.
nnRpc.metaSave("metaSaveOverwrite.out.txt");
nnRpc.metaSave("metaSaveOverwrite.out.txt");
// Read output file.
FileInputStream fis = null;
InputStreamReader isr = null;
BufferedReader rdr = null;
try {
fis = new FileInputStream(getLogFile("metaSaveOverwrite.out.txt"));
isr = new InputStreamReader(fis);
rdr = new BufferedReader(isr);
// Validate that file was overwritten (not appended) by checking for
// presence of only one "Live Datanodes" line.
boolean foundLiveDatanodesLine = false;
String line = rdr.readLine();
while (line != null) {
if (line.startsWith("Live Datanodes")) {
if (foundLiveDatanodesLine) {
fail("multiple Live Datanodes lines, output file not overwritten");
}
foundLiveDatanodesLine = true;
}
line = rdr.readLine();
}
} finally {
IOUtils.cleanup(null, rdr, isr, fis);
}
}
@AfterClass
public static void tearDown() throws IOException {
if (fileSys != null)
fileSys.close();
if (cluster != null)
cluster.shutdown();
}
/**
* Returns a File for the given name inside the log directory.
*
* @param name String file name
* @return File for given name inside log directory
*/
private static File getLogFile(String name) {
return new File(System.getProperty("hadoop.log.dir"), name);
}
}
| 7,370
| 32.352941
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestLargeDirectoryDelete.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.util.Time;
import org.junit.Assert;
import org.junit.Test;
/**
* Ensure during large directory delete, namenode does not block until the
* deletion completes and handles new requests from other clients
*/
public class TestLargeDirectoryDelete {
private static final Log LOG = LogFactory.getLog(TestLargeDirectoryDelete.class);
private static final Configuration CONF = new HdfsConfiguration();
private static final int TOTAL_BLOCKS = 10000;
private MiniDFSCluster mc = null;
private int createOps = 0;
private int lockOps = 0;
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
}
/** create a file with a length of <code>filelen</code> */
private void createFile(final String fileName, final long filelen) throws IOException {
FileSystem fs = mc.getFileSystem();
Path filePath = new Path(fileName);
DFSTestUtil.createFile(fs, filePath, filelen, (short) 1, 0);
}
/** Create a large number of directories and files */
private void createFiles() throws IOException {
Random rand = new Random();
// Create files in a directory with random depth
// ranging from 0-10.
for (int i = 0; i < TOTAL_BLOCKS; i+=100) {
String filename = "/root/";
int dirs = rand.nextInt(10); // Depth of the directory
for (int j=i; j >=(i-dirs); j--) {
filename += j + "/";
}
filename += "file" + i;
createFile(filename, 100);
}
}
private int getBlockCount() {
Assert.assertNotNull("Null cluster", mc);
Assert.assertNotNull("No Namenode in cluster", mc.getNameNode());
FSNamesystem namesystem = mc.getNamesystem();
Assert.assertNotNull("Null Namesystem in cluster", namesystem);
Assert.assertNotNull("Null Namesystem.blockmanager", namesystem.getBlockManager());
return (int) namesystem.getBlocksTotal();
}
/** Run multiple threads doing simultaneous operations on the namenode
* while a large directory is being deleted.
*/
private void runThreads() throws Throwable {
final TestThread threads[] = new TestThread[2];
// Thread for creating files
threads[0] = new TestThread() {
@Override
protected void execute() throws Throwable {
while(live) {
try {
int blockcount = getBlockCount();
if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
String file = "/tmp" + createOps;
createFile(file, 1);
mc.getFileSystem().delete(new Path(file), true);
createOps++;
}
} catch (IOException ex) {
LOG.info("createFile exception ", ex);
break;
}
}
}
};
// Thread that periodically acquires the FSNamesystem lock
threads[1] = new TestThread() {
@Override
protected void execute() throws Throwable {
while(live) {
try {
int blockcount = getBlockCount();
if (blockcount < TOTAL_BLOCKS && blockcount > 0) {
mc.getNamesystem().writeLock();
try {
lockOps++;
} finally {
mc.getNamesystem().writeUnlock();
}
Thread.sleep(1);
}
} catch (InterruptedException ex) {
LOG.info("lockOperation exception ", ex);
break;
}
}
}
};
threads[0].start();
threads[1].start();
final long start = Time.now();
FSNamesystem.BLOCK_DELETION_INCREMENT = 1;
mc.getFileSystem().delete(new Path("/root"), true); // recursive delete
final long end = Time.now();
threads[0].endThread();
threads[1].endThread();
LOG.info("Deletion took " + (end - start) + "msecs");
LOG.info("createOperations " + createOps);
LOG.info("lockOperations " + lockOps);
Assert.assertTrue(lockOps + createOps > 0);
threads[0].rethrow();
threads[1].rethrow();
}
/**
* An abstract class for tests that catches exceptions and can
* rethrow them on a different thread, and has an {@link #endThread()}
* operation that flips a volatile boolean before interrupting the thread.
* Also: after running the implementation of {@link #execute()} in the
* implementation class, the thread is notified: other threads can wait
* for it to terminate
*/
private abstract class TestThread extends Thread {
volatile Throwable thrown;
protected volatile boolean live = true;
@Override
public void run() {
try {
execute();
} catch (Throwable throwable) {
LOG.warn(throwable);
setThrown(throwable);
} finally {
synchronized (this) {
this.notify();
}
}
}
protected abstract void execute() throws Throwable;
protected synchronized void setThrown(Throwable thrown) {
this.thrown = thrown;
}
/**
* Rethrow anything caught
* @throws Throwable any non-null throwable raised by the execute method.
*/
public synchronized void rethrow() throws Throwable {
if (thrown != null) {
throw thrown;
}
}
/**
* End the thread by setting the live p
*/
public synchronized void endThread() {
live = false;
interrupt();
try {
wait();
} catch (InterruptedException e) {
if(LOG.isDebugEnabled()) {
LOG.debug("Ignoring " + e, e);
}
}
}
}
@Test
public void largeDelete() throws Throwable {
mc = new MiniDFSCluster.Builder(CONF).build();
try {
mc.waitActive();
Assert.assertNotNull("No Namenode in cluster", mc.getNameNode());
createFiles();
Assert.assertEquals(TOTAL_BLOCKS, getBlockCount());
runThreads();
} finally {
mc.shutdown();
}
}
}
| 7,202
| 30.871681
| 89
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import java.io.IOException;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.*;
/**
* Verify that TestCommitBlockSynchronization is idempotent.
*/
public class TestCommitBlockSynchronization {
private static final long blockId = 100;
private static final long length = 200;
private static final long genStamp = 300;
private FSNamesystem makeNameSystemSpy(Block block, INodeFile file)
throws IOException {
Configuration conf = new Configuration();
FSEditLog editlog = mock(FSEditLog.class);
FSImage image = new FSImage(conf);
Whitebox.setInternalState(image, "editLog", editlog);
final DatanodeStorageInfo[] targets = {};
FSNamesystem namesystem = new FSNamesystem(conf, image);
namesystem.setImageLoaded(true);
// set file's parent as root and put the file to inodeMap, so
// FSNamesystem's isFileDeleted() method will return false on this file
if (file.getParent() == null) {
INodeDirectory mparent = mock(INodeDirectory.class);
INodeDirectory parent = new INodeDirectory(mparent.getId(), new byte[0],
mparent.getPermissionStatus(), mparent.getAccessTime());
parent.setLocalName(new byte[0]);
parent.addChild(file);
file.setParent(parent);
}
namesystem.dir.getINodeMap().put(file);
FSNamesystem namesystemSpy = spy(namesystem);
BlockInfoContiguousUnderConstruction blockInfo = new BlockInfoContiguousUnderConstruction(
block, (short) 1, HdfsServerConstants.BlockUCState.UNDER_CONSTRUCTION, targets);
blockInfo.setBlockCollection(file);
blockInfo.setGenerationStamp(genStamp);
blockInfo.initializeBlockRecovery(genStamp);
doReturn(blockInfo).when(file).removeLastBlock(any(Block.class));
doReturn(true).when(file).isUnderConstruction();
doReturn(new BlockInfoContiguous[1]).when(file).getBlocks();
doReturn(blockInfo).when(namesystemSpy).getStoredBlock(any(Block.class));
doReturn(blockInfo).when(file).getLastBlock();
doReturn("").when(namesystemSpy).closeFileCommitBlocks(
any(INodeFile.class), any(BlockInfo.class));
doReturn(mock(FSEditLog.class)).when(namesystemSpy).getEditLog();
return namesystemSpy;
}
private INodeFile mockFileUnderConstruction() {
INodeFile file = mock(INodeFile.class);
return file;
}
@Test
public void testCommitBlockSynchronization() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, false,
false, newTargets, null);
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, false, false, newTargets, null);
// Simulate 'completing' the block.
BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
completedBlockInfo.setBlockCollection(file);
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy)
.getStoredBlock(any(Block.class));
doReturn(completedBlockInfo).when(file).getLastBlock();
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, false, false, newTargets, null);
}
@Test
public void testCommitBlockSynchronization2() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, false,
false, newTargets, null);
// Make sure the call fails if the generation stamp does not match
// the block recovery ID.
try {
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp - 1, length, false, false, newTargets, null);
fail("Failed to get expected IOException on generation stamp/" +
"recovery ID mismatch");
} catch (IOException ioe) {
// Expected exception.
}
}
@Test
public void testCommitBlockSynchronizationWithDelete() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, false,
true, newTargets, null);
// Simulate removing the last block from the file.
doReturn(null).when(file).removeLastBlock(any(Block.class));
// Repeat the call to make sure it does not throw
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, false, true, newTargets, null);
}
@Test
public void testCommitBlockSynchronizationWithClose() throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[0];
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, true,
false, newTargets, null);
// Repeat the call to make sure it returns true
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, true, false, newTargets, null);
BlockInfo completedBlockInfo = new BlockInfoContiguous(block, (short) 1);
completedBlockInfo.setBlockCollection(file);
completedBlockInfo.setGenerationStamp(genStamp);
doReturn(completedBlockInfo).when(namesystemSpy)
.getStoredBlock(any(Block.class));
doReturn(completedBlockInfo).when(file).getLastBlock();
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, true, false, newTargets, null);
}
@Test
public void testCommitBlockSynchronizationWithCloseAndNonExistantTarget()
throws IOException {
INodeFile file = mockFileUnderConstruction();
Block block = new Block(blockId, length, genStamp);
FSNamesystem namesystemSpy = makeNameSystemSpy(block, file);
DatanodeID[] newTargets = new DatanodeID[]{
new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0, 0)};
ExtendedBlock lastBlock = new ExtendedBlock();
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, true,
false, newTargets, null);
// Repeat the call to make sure it returns true
namesystemSpy.commitBlockSynchronization(
lastBlock, genStamp, length, true, false, newTargets, null);
}
}
| 8,600
| 39.380282
| 94
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.BufferedInputStream;
import java.io.ByteArrayInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.InputStream;
import java.io.RandomAccessFile;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ChecksumException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.util.XMLUtils.InvalidXmlException;
import org.apache.hadoop.hdfs.util.XMLUtils.Stanza;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.Mockito;
import org.xml.sax.ContentHandler;
import org.xml.sax.SAXException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
/**
* This class tests the creation and validation of a checkpoint.
*/
public class TestEditLog {
static {
((Log4JLogger)FSEditLog.LOG).getLogger().setLevel(Level.ALL);
}
/**
* A garbage mkdir op which is used for testing
* {@link EditLogFileInputStream#scanEditLog(File)}
*/
public static class GarbageMkdirOp extends FSEditLogOp {
public GarbageMkdirOp() {
super(FSEditLogOpCodes.OP_MKDIR);
}
@Override
void resetSubFields() {
// nop
}
@Override
void readFields(DataInputStream in, int logVersion) throws IOException {
throw new IOException("cannot decode GarbageMkdirOp");
}
@Override
public void writeFields(DataOutputStream out) throws IOException {
// write in some garbage content
Random random = new Random();
byte[] content = new byte[random.nextInt(16) + 1];
random.nextBytes(content);
out.write(content);
}
@Override
protected void toXml(ContentHandler contentHandler) throws SAXException {
throw new UnsupportedOperationException(
"Not supported for GarbageMkdirOp");
}
@Override
void fromXml(Stanza st) throws InvalidXmlException {
throw new UnsupportedOperationException(
"Not supported for GarbageMkdirOp");
}
}
static final Log LOG = LogFactory.getLog(TestEditLog.class);
static final int NUM_DATA_NODES = 0;
// This test creates NUM_THREADS threads and each thread does
// 2 * NUM_TRANSACTIONS Transactions concurrently.
static final int NUM_TRANSACTIONS = 100;
static final int NUM_THREADS = 100;
static final File TEST_DIR = PathUtils.getTestDir(TestEditLog.class);
/** An edits log with 3 edits from 0.20 - the result of
* a fresh namesystem followed by hadoop fs -touchz /myfile */
static final byte[] HADOOP20_SOME_EDITS =
StringUtils.hexStringToByte((
"ffff ffed 0a00 0000 0000 03fa e100 0000" +
"0005 0007 2f6d 7966 696c 6500 0133 000d" +
"3132 3932 3331 3634 3034 3138 3400 0d31" +
"3239 3233 3136 3430 3431 3834 0009 3133" +
"3432 3137 3732 3800 0000 0004 746f 6464" +
"0a73 7570 6572 6772 6f75 7001 a400 1544" +
"4653 436c 6965 6e74 5f2d 3136 3136 3535" +
"3738 3931 000b 3137 322e 3239 2e35 2e33" +
"3209 0000 0005 0007 2f6d 7966 696c 6500" +
"0133 000d 3132 3932 3331 3634 3034 3138" +
"3400 0d31 3239 3233 3136 3430 3431 3834" +
"0009 3133 3432 3137 3732 3800 0000 0004" +
"746f 6464 0a73 7570 6572 6772 6f75 7001" +
"a4ff 0000 0000 0000 0000 0000 0000 0000"
).replace(" ",""));
static {
// No need to fsync for the purposes of tests. This makes
// the tests run much faster.
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
static final byte TRAILER_BYTE = FSEditLogOpCodes.OP_INVALID.getOpCode();
private static final int CHECKPOINT_ON_STARTUP_MIN_TXNS = 100;
//
// an object that does a bunch of transactions
//
static class Transactions implements Runnable {
final FSNamesystem namesystem;
final int numTransactions;
final short replication = 3;
final long blockSize = 64;
final int startIndex;
Transactions(FSNamesystem ns, int numTx, int startIdx) {
namesystem = ns;
numTransactions = numTx;
startIndex = startIdx;
}
// add a bunch of transactions.
@Override
public void run() {
PermissionStatus p = namesystem.createFsOwnerPermissions(
new FsPermission((short)0777));
FSEditLog editLog = namesystem.getEditLog();
for (int i = 0; i < numTransactions; i++) {
INodeFile inode = new INodeFile(namesystem.dir.allocateNewInodeId(), null,
p, 0L, 0L, BlockInfo.EMPTY_ARRAY, replication, blockSize);
inode.toUnderConstruction("", "");
editLog.logOpenFile("/filename" + (startIndex + i), inode, false, false);
editLog.logCloseFile("/filename" + (startIndex + i), inode);
editLog.logSync();
}
}
}
/**
* Construct FSEditLog with default configuration, taking editDirs from NNStorage
*
* @param storage Storage object used by namenode
*/
private static FSEditLog getFSEditLog(NNStorage storage) throws IOException {
Configuration conf = new Configuration();
// Make sure the edits dirs are set in the provided configuration object.
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
StringUtils.join(",", storage.getEditsDirectories()));
FSEditLog log = new FSEditLog(conf, storage, FSNamesystem.getNamespaceEditsDirs(conf));
return log;
}
/**
* Test case for an empty edit log from a prior version of Hadoop.
*/
@Test
public void testPreTxIdEditLogNoEdits() throws Exception {
FSNamesystem namesys = Mockito.mock(FSNamesystem.class);
namesys.dir = Mockito.mock(FSDirectory.class);
long numEdits = testLoad(
StringUtils.hexStringToByte("ffffffed"), // just version number
namesys);
assertEquals(0, numEdits);
}
/**
* Test case for loading a very simple edit log from a format
* prior to the inclusion of edit transaction IDs in the log.
*/
@Test
public void testPreTxidEditLogWithEdits() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
long numEdits = testLoad(HADOOP20_SOME_EDITS, namesystem);
assertEquals(3, numEdits);
// Sanity check the edit
HdfsFileStatus fileInfo = namesystem.getFileInfo("/myfile", false);
assertEquals("supergroup", fileInfo.getGroup());
assertEquals(3, fileInfo.getReplication());
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
private long testLoad(byte[] data, FSNamesystem namesys) throws IOException {
FSEditLogLoader loader = new FSEditLogLoader(namesys, 0);
return loader.loadFSEdits(new EditLogByteInputStream(data), 1);
}
/**
* Simple test for writing to and rolling the edit log.
*/
@Test
public void testSimpleEditLog() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getInProgressEditsFileName(1));
editLog.logSetReplication("fakefile", (short) 1);
editLog.logSync();
editLog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getFinalizedEditsFileName(1,3));
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getInProgressEditsFileName(4));
editLog.logSetReplication("fakefile", (short) 2);
editLog.logSync();
editLog.close();
} finally {
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
/**
* Tests transaction logging in dfs.
*/
@Test
public void testMultiThreadedEditLog() throws IOException {
testEditLog(2048);
// force edit buffer to automatically sync on each log of edit log entry
testEditLog(1);
}
private void assertExistsInStorageDirs(MiniDFSCluster cluster,
NameNodeDirType dirType,
String filename) {
NNStorage storage = cluster.getNamesystem().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(dirType)) {
File f = new File(sd.getCurrentDir(), filename);
assertTrue("Expect that " + f + " exists", f.exists());
}
}
/**
* Test edit log with different initial buffer size
*
* @param initialSize initial edit log buffer size
* @throws IOException
*/
private void testEditLog(int initialSize) throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
File dir = new File(it.next().getPath());
System.out.println(dir);
}
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = fsimage.getEditLog();
// set small size of flush buffer
editLog.setOutputBufferCapacity(initialSize);
// Roll log so new output buffer size takes effect
// we should now be writing to edits_inprogress_3
fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
// Remember the current lastInodeId and will reset it back to test
// loading editlog segments.The transactions in the following allocate new
// inode id to write to editlogs but doesn't create ionde in namespace
long originalLastInodeId = namesystem.dir.getLastInodeId();
// Create threads and make them run transactions concurrently.
Thread threadId[] = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
Transactions trans =
new Transactions(namesystem, NUM_TRANSACTIONS, i*NUM_TRANSACTIONS);
threadId[i] = new Thread(trans, "TransactionThread-" + i);
threadId[i].start();
}
// wait for all transactions to get over
for (int i = 0; i < NUM_THREADS; i++) {
try {
threadId[i].join();
} catch (InterruptedException e) {
i--; // retry
}
}
// Reopen some files as for append
Transactions trans =
new Transactions(namesystem, NUM_TRANSACTIONS, NUM_TRANSACTIONS / 2);
trans.run();
// Roll another time to finalize edits_inprogress_3
fsimage.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
long expectedTxns = ((NUM_THREADS+1) * 2 * NUM_TRANSACTIONS) + 2; // +2 for start/end txns
// Verify that we can read in all the transactions that we have written.
// If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception.
//
namesystem.dir.resetLastInodeIdWithoutChecking(originalLastInodeId);
for (Iterator<StorageDirectory> it =
fsimage.getStorage().dirIterator(NameNodeDirType.EDITS); it.hasNext();) {
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
File editFile = NNStorage.getFinalizedEditsFile(it.next(), 3,
3 + expectedTxns - 1);
assertTrue("Expect " + editFile + " exists", editFile.exists());
System.out.println("Verifying file: " + editFile);
long numEdits = loader.loadFSEdits(
new EditLogFileInputStream(editFile), 3);
int numLeases = namesystem.leaseManager.countLease();
System.out.println("Number of outstanding leases " + numLeases);
assertEquals(0, numLeases);
assertTrue("Verification for " + editFile + " failed. " +
"Expected " + expectedTxns + " transactions. "+
"Found " + numEdits + " transactions.",
numEdits == expectedTxns);
}
} finally {
try {
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
} catch (Throwable t) {
LOG.error("Couldn't shut down cleanly", t);
}
}
}
private void doLogEdit(ExecutorService exec, final FSEditLog log,
final String filename) throws Exception
{
exec.submit(new Callable<Void>() {
@Override
public Void call() {
log.logSetReplication(filename, (short)1);
return null;
}
}).get();
}
private void doCallLogSync(ExecutorService exec, final FSEditLog log)
throws Exception
{
exec.submit(new Callable<Void>() {
@Override
public Void call() {
log.logSync();
return null;
}
}).get();
}
private void doCallLogSyncAll(ExecutorService exec, final FSEditLog log)
throws Exception
{
exec.submit(new Callable<Void>() {
@Override
public Void call() throws Exception {
log.logSyncAll();
return null;
}
}).get();
}
@Test
public void testSyncBatching() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
ExecutorService threadA = Executors.newSingleThreadExecutor();
ExecutorService threadB = Executors.newSingleThreadExecutor();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
assertEquals("should start with only the BEGIN_LOG_SEGMENT txn synced",
1, editLog.getSyncTxId());
// Log an edit from thread A
doLogEdit(threadA, editLog, "thread-a 1");
assertEquals("logging edit without syncing should do not affect txid",
1, editLog.getSyncTxId());
// Log an edit from thread B
doLogEdit(threadB, editLog, "thread-b 1");
assertEquals("logging edit without syncing should do not affect txid",
1, editLog.getSyncTxId());
// Now ask to sync edit from B, which should sync both edits.
doCallLogSync(threadB, editLog);
assertEquals("logSync from second thread should bump txid up to 3",
3, editLog.getSyncTxId());
// Now ask to sync edit from A, which was already batched in - thus
// it should increment the batch count metric
doCallLogSync(threadA, editLog);
assertEquals("logSync from first thread shouldn't change txid",
3, editLog.getSyncTxId());
//Should have incremented the batch count exactly once
assertCounter("TransactionsBatchedInSync", 1L,
getMetrics("NameNodeActivity"));
} finally {
threadA.shutdown();
threadB.shutdown();
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
/**
* Test what happens with the following sequence:
*
* Thread A writes edit
* Thread B calls logSyncAll
* calls close() on stream
* Thread A calls logSync
*
* This sequence is legal and can occur if enterSafeMode() is closely
* followed by saveNamespace.
*/
@Test
public void testBatchedSyncWithClosedLogs() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
ExecutorService threadA = Executors.newSingleThreadExecutor();
ExecutorService threadB = Executors.newSingleThreadExecutor();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
// Log an edit from thread A
doLogEdit(threadA, editLog, "thread-a 1");
assertEquals("logging edit without syncing should do not affect txid",
1, editLog.getSyncTxId());
// logSyncAll in Thread B
doCallLogSyncAll(threadB, editLog);
assertEquals("logSyncAll should sync thread A's transaction",
2, editLog.getSyncTxId());
// Close edit log
editLog.close();
// Ask thread A to finish sync (which should be a no-op)
doCallLogSync(threadA, editLog);
} finally {
threadA.shutdown();
threadB.shutdown();
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
@Test
public void testEditChecksum() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
fileSys.mkdirs(new Path("/tmp"));
Iterator<StorageDirectory> iter = fsimage.getStorage().
dirIterator(NameNodeDirType.EDITS);
LinkedList<StorageDirectory> sds = new LinkedList<StorageDirectory>();
while (iter.hasNext()) {
sds.add(iter.next());
}
editLog.close();
cluster.shutdown();
for (StorageDirectory sd : sds) {
File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 3);
assertTrue(editFile.exists());
long fileLen = editFile.length();
LOG.debug("Corrupting Log File: " + editFile + " len: " + fileLen);
RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
rwf.seek(fileLen-4); // seek to checksum bytes
int b = rwf.readInt();
rwf.seek(fileLen-4);
rwf.writeInt(b+1);
rwf.close();
}
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).format(false).build();
fail("should not be able to start");
} catch (IOException e) {
// expected
assertNotNull("Cause of exception should be ChecksumException", e.getCause());
assertEquals("Cause of exception should be ChecksumException",
ChecksumException.class, e.getCause().getClass());
}
}
/**
* Test what happens if the NN crashes when it has has started but
* had no transactions written.
*/
@Test
public void testCrashRecoveryNoTransactions() throws Exception {
testCrashRecovery(0);
}
/**
* Test what happens if the NN crashes when it has has started and
* had a few transactions written
*/
@Test
public void testCrashRecoveryWithTransactions() throws Exception {
testCrashRecovery(150);
}
/**
* Do a test to make sure the edit log can recover edits even after
* a non-clean shutdown. This does a simulated crash by copying over
* the edits directory while the NN is still running, then shutting it
* down, and restoring that edits directory.
*/
private void testCrashRecovery(int numTransactions) throws Exception {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
CHECKPOINT_ON_STARTUP_MIN_TXNS);
try {
LOG.info("\n===========================================\n" +
"Starting empty cluster");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(true)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
for (int i = 0; i < numTransactions; i++) {
fs.mkdirs(new Path("/test" + i));
}
// Directory layout looks like:
// test/data/dfs/nameN/current/{fsimage_N,edits_...}
File nameDir = new File(cluster.getNameDirs(0).iterator().next().getPath());
File dfsDir = nameDir.getParentFile();
assertEquals(dfsDir.getName(), "dfs"); // make sure we got right dir
LOG.info("Copying data directory aside to a hot backup");
File backupDir = new File(dfsDir.getParentFile(), "dfs.backup-while-running");
FileUtils.copyDirectory(dfsDir, backupDir);
LOG.info("Shutting down cluster #1");
cluster.shutdown();
cluster = null;
// Now restore the backup
FileUtil.fullyDeleteContents(dfsDir);
dfsDir.delete();
backupDir.renameTo(dfsDir);
// Directory layout looks like:
// test/data/dfs/nameN/current/{fsimage_N,edits_...}
File currentDir = new File(nameDir, "current");
// We should see the file as in-progress
File editsFile = new File(currentDir,
NNStorage.getInProgressEditsFileName(1));
assertTrue("Edits file " + editsFile + " should exist", editsFile.exists());
File imageFile = FSImageTestUtil.findNewestImageFile(
currentDir.getAbsolutePath());
assertNotNull("No image found in " + nameDir, imageFile);
assertEquals(NNStorage.getImageFileName(0), imageFile.getName());
// Try to start a new cluster
LOG.info("\n===========================================\n" +
"Starting same cluster after simulated crash");
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.build();
cluster.waitActive();
// We should still have the files we wrote prior to the simulated crash
fs = cluster.getFileSystem();
for (int i = 0; i < numTransactions; i++) {
assertTrue(fs.exists(new Path("/test" + i)));
}
long expectedTxId;
if (numTransactions > CHECKPOINT_ON_STARTUP_MIN_TXNS) {
// It should have saved a checkpoint on startup since there
// were more unfinalized edits than configured
expectedTxId = numTransactions + 1;
} else {
// otherwise, it shouldn't have made a checkpoint
expectedTxId = 0;
}
imageFile = FSImageTestUtil.findNewestImageFile(
currentDir.getAbsolutePath());
assertNotNull("No image found in " + nameDir, imageFile);
assertEquals(NNStorage.getImageFileName(expectedTxId),
imageFile.getName());
// Started successfully. Shut it down and make sure it can restart.
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.build();
cluster.waitActive();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
// should succeed - only one corrupt log dir
@Test
public void testCrashRecoveryEmptyLogOneDir() throws Exception {
doTestCrashRecoveryEmptyLog(false, true, true);
}
// should fail - seen_txid updated to 3, but no log dir contains txid 3
@Test
public void testCrashRecoveryEmptyLogBothDirs() throws Exception {
doTestCrashRecoveryEmptyLog(true, true, false);
}
// should succeed - only one corrupt log dir
@Test
public void testCrashRecoveryEmptyLogOneDirNoUpdateSeenTxId()
throws Exception {
doTestCrashRecoveryEmptyLog(false, false, true);
}
// should succeed - both log dirs corrupt, but seen_txid never updated
@Test
public void testCrashRecoveryEmptyLogBothDirsNoUpdateSeenTxId()
throws Exception {
doTestCrashRecoveryEmptyLog(true, false, true);
}
/**
* Test that the NN handles the corruption properly
* after it crashes just after creating an edit log
* (ie before writing START_LOG_SEGMENT). In the case
* that all logs have this problem, it should mark them
* as corrupt instead of trying to finalize them.
*
* @param inBothDirs if true, there will be a truncated log in
* both of the edits directories. If false, the truncated log
* will only be in one of the directories. In both cases, the
* NN should fail to start up, because it's aware that txid 3
* was reached, but unable to find a non-corrupt log starting there.
* @param updateTransactionIdFile if true update the seen_txid file.
* If false, it will not be updated. This will simulate a case where
* the NN crashed between creating the new segment and updating the
* seen_txid file.
* @param shouldSucceed true if the test is expected to succeed.
*/
private void doTestCrashRecoveryEmptyLog(boolean inBothDirs,
boolean updateTransactionIdFile, boolean shouldSucceed)
throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES).build();
cluster.shutdown();
Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
for (URI uri : editsDirs) {
File dir = new File(uri.getPath());
File currentDir = new File(dir, "current");
// We should start with only the finalized edits_1-2
GenericTestUtils.assertGlobEquals(currentDir, "edits_.*",
NNStorage.getFinalizedEditsFileName(1, 2));
// Make a truncated edits_3_inprogress
File log = new File(currentDir,
NNStorage.getInProgressEditsFileName(3));
EditLogFileOutputStream stream = new EditLogFileOutputStream(conf, log, 1024);
try {
stream.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
if (!inBothDirs) {
break;
}
NNStorage storage = new NNStorage(conf,
Collections.<URI>emptyList(),
Lists.newArrayList(uri));
if (updateTransactionIdFile) {
storage.writeTransactionIdFileToStorage(3);
}
storage.close();
} finally {
stream.close();
}
}
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES).format(false).build();
if (!shouldSucceed) {
fail("Should not have succeeded in startin cluster");
}
} catch (IOException ioe) {
if (shouldSucceed) {
LOG.info("Should have succeeded in starting cluster, but failed", ioe);
throw ioe;
} else {
GenericTestUtils.assertExceptionContains(
"Gap in transactions. Expected to be able to read up until " +
"at least txid 3 but unable to find any edit logs containing " +
"txid 3", ioe);
}
} finally {
cluster.shutdown();
}
}
private static class EditLogByteInputStream extends EditLogInputStream {
private final InputStream input;
private final long len;
private int version;
private FSEditLogOp.Reader reader = null;
private FSEditLogLoader.PositionTrackingInputStream tracker = null;
public EditLogByteInputStream(byte[] data) throws IOException {
len = data.length;
input = new ByteArrayInputStream(data);
BufferedInputStream bin = new BufferedInputStream(input);
DataInputStream in = new DataInputStream(bin);
version = EditLogFileInputStream.readLogVersion(in, true);
tracker = new FSEditLogLoader.PositionTrackingInputStream(in);
in = new DataInputStream(tracker);
reader = new FSEditLogOp.Reader(in, tracker, version);
}
@Override
public long getFirstTxId() {
return HdfsServerConstants.INVALID_TXID;
}
@Override
public long getLastTxId() {
return HdfsServerConstants.INVALID_TXID;
}
@Override
public long length() throws IOException {
return len;
}
@Override
public long getPosition() {
return tracker.getPos();
}
@Override
protected FSEditLogOp nextOp() throws IOException {
return reader.readOp(false);
}
@Override
public int getVersion(boolean verifyVersion) throws IOException {
return version;
}
@Override
public void close() throws IOException {
input.close();
}
@Override
public String getName() {
return "AnonEditLogByteInputStream";
}
@Override
public boolean isInProgress() {
return true;
}
@Override
public void setMaxOpSize(int maxOpSize) {
reader.setMaxOpSize(maxOpSize);
}
@Override public boolean isLocalLog() {
return true;
}
}
@Test
public void testFailedOpen() throws Exception {
File logDir = new File(TEST_DIR, "testFailedOpen");
logDir.mkdirs();
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
try {
FileUtil.setWritable(logDir, false);
log.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
fail("Did no throw exception on only having a bad dir");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"too few journals successfully started", ioe);
} finally {
FileUtil.setWritable(logDir, true);
log.close();
}
}
/**
* Regression test for HDFS-1112/HDFS-3020. Ensures that, even if
* logSync isn't called periodically, the edit log will sync itself.
*/
@Test
public void testAutoSync() throws Exception {
File logDir = new File(TEST_DIR, "testAutoSync");
logDir.mkdirs();
FSEditLog log = FSImageTestUtil.createStandaloneEditLog(logDir);
String oneKB = StringUtils.byteToHexString(
new byte[500]);
try {
log.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
NameNodeMetrics mockMetrics = Mockito.mock(NameNodeMetrics.class);
log.setMetricsForTests(mockMetrics);
for (int i = 0; i < 400; i++) {
log.logDelete(oneKB, 1L, false);
}
// After ~400KB, we're still within the 512KB buffer size
Mockito.verify(mockMetrics, Mockito.times(0)).addSync(Mockito.anyLong());
// After ~400KB more, we should have done an automatic sync
for (int i = 0; i < 400; i++) {
log.logDelete(oneKB, 1L, false);
}
Mockito.verify(mockMetrics, Mockito.times(1)).addSync(Mockito.anyLong());
} finally {
log.close();
}
}
/**
* Tests the getEditLogManifest function using mock storage for a number
* of different situations.
*/
@Test
public void testEditLogManifestMocks() throws IOException {
NNStorage storage;
FSEditLog log;
// Simple case - different directories have the same
// set of logs, with an in-progress one at end
storage = mockStorageWithEdits(
"[1,100]|[101,200]|[201,]",
"[1,100]|[101,200]|[201,]");
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",
log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",
log.getEditLogManifest(101).toString());
// Another simple case, different directories have different
// sets of files
storage = mockStorageWithEdits(
"[1,100]|[101,200]",
"[1,100]|[201,300]|[301,400]"); // nothing starting at 101
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200], [201,300], [301,400]]",
log.getEditLogManifest(1).toString());
// Case where one directory has an earlier finalized log, followed
// by a gap. The returned manifest should start after the gap.
storage = mockStorageWithEdits(
"[1,100]|[301,400]", // gap from 101 to 300
"[301,400]|[401,500]");
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[301,400], [401,500]]",
log.getEditLogManifest(1).toString());
// Case where different directories have different length logs
// starting at the same txid - should pick the longer one
storage = mockStorageWithEdits(
"[1,100]|[101,150]", // short log at 101
"[1,50]|[101,200]"); // short log at 1
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",
log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",
log.getEditLogManifest(101).toString());
// Case where the first storage has an inprogress while
// the second has finalised that file (i.e. the first failed
// recently)
storage = mockStorageWithEdits(
"[1,100]|[101,]",
"[1,100]|[101,200]");
log = getFSEditLog(storage);
log.initJournalsForWrite();
assertEquals("[[1,100], [101,200]]",
log.getEditLogManifest(1).toString());
assertEquals("[[101,200]]",
log.getEditLogManifest(101).toString());
}
/**
* Create a mock NNStorage object with several directories, each directory
* holding edit logs according to a specification. Each directory
* is specified by a pipe-separated string. For example:
* <code>[1,100]|[101,200]</code> specifies a directory which
* includes two finalized segments, one from 1-100, and one from 101-200.
* The syntax <code>[1,]</code> specifies an in-progress log starting at
* txid 1.
*/
private NNStorage mockStorageWithEdits(String... editsDirSpecs) throws IOException {
List<StorageDirectory> sds = Lists.newArrayList();
List<URI> uris = Lists.newArrayList();
NNStorage storage = Mockito.mock(NNStorage.class);
for (String dirSpec : editsDirSpecs) {
List<String> files = Lists.newArrayList();
String[] logSpecs = dirSpec.split("\\|");
for (String logSpec : logSpecs) {
Matcher m = Pattern.compile("\\[(\\d+),(\\d+)?\\]").matcher(logSpec);
assertTrue("bad spec: " + logSpec, m.matches());
if (m.group(2) == null) {
files.add(NNStorage.getInProgressEditsFileName(
Long.parseLong(m.group(1))));
} else {
files.add(NNStorage.getFinalizedEditsFileName(
Long.parseLong(m.group(1)),
Long.parseLong(m.group(2))));
}
}
StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(
NameNodeDirType.EDITS, false,
files.toArray(new String[0]));
sds.add(sd);
URI u = URI.create("file:///storage"+ Math.random());
Mockito.doReturn(sd).when(storage).getStorageDirectory(u);
uris.add(u);
}
Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
Mockito.doReturn(uris).when(storage).getEditsDirectories();
return storage;
}
/**
* Specification for a failure during #setupEdits
*/
static class AbortSpec {
final int roll;
final int logindex;
/**
* Construct the failure specification.
* @param roll number to fail after. e.g. 1 to fail after the first roll
* @param loginfo index of journal to fail.
*/
AbortSpec(int roll, int logindex) {
this.roll = roll;
this.logindex = logindex;
}
}
final static int TXNS_PER_ROLL = 10;
final static int TXNS_PER_FAIL = 2;
/**
* Set up directories for tests.
*
* Each rolled file is 10 txns long.
* A failed file is 2 txns long.
*
* @param editUris directories to create edit logs in
* @param numrolls number of times to roll the edit log during setup
* @param closeOnFinish whether to close the edit log after setup
* @param abortAtRolls Specifications for when to fail, see AbortSpec
*/
public static NNStorage setupEdits(List<URI> editUris, int numrolls,
boolean closeOnFinish, AbortSpec... abortAtRolls) throws IOException {
List<AbortSpec> aborts = new ArrayList<AbortSpec>(Arrays.asList(abortAtRolls));
NNStorage storage = new NNStorage(new Configuration(),
Collections.<URI>emptyList(),
editUris);
storage.format(new NamespaceInfo());
FSEditLog editlog = getFSEditLog(storage);
// open the edit log and add two transactions
// logGenerationStamp is used, simply because it doesn't
// require complex arguments.
editlog.initJournalsForWrite();
editlog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
for (int i = 2; i < TXNS_PER_ROLL; i++) {
editlog.logGenerationStampV2((long) 0);
}
editlog.logSync();
// Go into edit log rolling loop.
// On each roll, the abortAtRolls abort specs are
// checked to see if an abort is required. If so the
// the specified journal is aborted. It will be brought
// back into rotation automatically by rollEditLog
for (int i = 0; i < numrolls; i++) {
editlog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
editlog.logGenerationStampV2((long) i);
editlog.logSync();
while (aborts.size() > 0
&& aborts.get(0).roll == (i+1)) {
AbortSpec spec = aborts.remove(0);
editlog.getJournals().get(spec.logindex).abort();
}
for (int j = 3; j < TXNS_PER_ROLL; j++) {
editlog.logGenerationStampV2((long) i);
}
editlog.logSync();
}
if (closeOnFinish) {
editlog.close();
}
FSImageTestUtil.logStorageContents(LOG, storage);
return storage;
}
/**
* Set up directories for tests.
*
* Each rolled file is 10 txns long.
* A failed file is 2 txns long.
*
* @param editUris directories to create edit logs in
* @param numrolls number of times to roll the edit log during setup
* @param abortAtRolls Specifications for when to fail, see AbortSpec
*/
public static NNStorage setupEdits(List<URI> editUris, int numrolls,
AbortSpec... abortAtRolls) throws IOException {
return setupEdits(editUris, numrolls, true, abortAtRolls);
}
/**
* Test loading an editlog which has had both its storage fail
* on alternating rolls. Two edit log directories are created.
* The first one fails on odd rolls, the second on even. Test
* that we are able to load the entire editlog regardless.
*/
@Test
public void testAlternatingJournalFailure() throws IOException {
File f1 = new File(TEST_DIR + "/alternatingjournaltest0");
File f2 = new File(TEST_DIR + "/alternatingjournaltest1");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
NNStorage storage = setupEdits(editUris, 10,
new AbortSpec(1, 0),
new AbortSpec(2, 1),
new AbortSpec(3, 0),
new AbortSpec(4, 1),
new AbortSpec(5, 0),
new AbortSpec(6, 1),
new AbortSpec(7, 0),
new AbortSpec(8, 1),
new AbortSpec(9, 0),
new AbortSpec(10, 1));
long totaltxnread = 0;
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
Iterable<EditLogInputStream> editStreams = editlog.selectInputStreams(startTxId,
TXNS_PER_ROLL*11);
for (EditLogInputStream edits : editStreams) {
FSEditLogLoader.EditLogValidation val = FSEditLogLoader.validateEditLog(edits);
long read = (val.getEndTxId() - edits.getFirstTxId()) + 1;
LOG.info("Loading edits " + edits + " read " + read);
assertEquals(startTxId, edits.getFirstTxId());
startTxId += read;
totaltxnread += read;
}
editlog.close();
storage.close();
assertEquals(TXNS_PER_ROLL*11, totaltxnread);
}
/**
* Test loading an editlog with gaps. A single editlog directory
* is set up. On of the edit log files is deleted. This should
* fail when selecting the input streams as it will not be able
* to select enough streams to load up to 4*TXNS_PER_ROLL.
* There should be 4*TXNS_PER_ROLL transactions as we rolled 3
* times.
*/
@Test
public void testLoadingWithGaps() throws IOException {
File f1 = new File(TEST_DIR + "/gaptest0");
List<URI> editUris = ImmutableList.of(f1.toURI());
NNStorage storage = setupEdits(editUris, 3);
final long startGapTxId = 1*TXNS_PER_ROLL + 1;
final long endGapTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId,
endGapTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
assertTrue(files[0].delete());
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
try {
editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
fail("Should have thrown exception");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Gap in transactions. Expected to be able to read up until " +
"at least txid 40 but unable to find any edit logs containing " +
"txid 11", ioe);
}
}
/**
* Test that we can read from a byte stream without crashing.
*
*/
static void validateNoCrash(byte garbage[]) throws IOException {
final File TEST_LOG_NAME = new File(TEST_DIR, "test_edit_log");
EditLogFileOutputStream elfos = null;
EditLogFileInputStream elfis = null;
try {
elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 0);
elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
elfos.writeRaw(garbage, 0, garbage.length);
elfos.setReadyToFlush();
elfos.flushAndSync(true);
elfos.close();
elfos = null;
elfis = new EditLogFileInputStream(TEST_LOG_NAME);
// verify that we can read everything without killing the JVM or
// throwing an exception other than IOException
try {
while (true) {
FSEditLogOp op = elfis.readOp();
if (op == null)
break;
}
} catch (IOException e) {
} catch (Throwable t) {
fail("Caught non-IOException throwable " +
StringUtils.stringifyException(t));
}
} finally {
if ((elfos != null) && (elfos.isOpen()))
elfos.close();
if (elfis != null)
elfis.close();
}
}
static byte[][] invalidSequenecs = null;
/**
* "Fuzz" test for the edit log.
*
* This tests that we can read random garbage from the edit log without
* crashing the JVM or throwing an unchecked exception.
*/
@Test
public void testFuzzSequences() throws IOException {
final int MAX_GARBAGE_LENGTH = 512;
final int MAX_INVALID_SEQ = 5000;
// The seed to use for our random number generator. When given the same
// seed, Java.util.Random will always produce the same sequence of values.
// This is important because it means that the test is deterministic and
// repeatable on any machine.
final int RANDOM_SEED = 123;
Random r = new Random(RANDOM_SEED);
for (int i = 0; i < MAX_INVALID_SEQ; i++) {
byte[] garbage = new byte[r.nextInt(MAX_GARBAGE_LENGTH)];
r.nextBytes(garbage);
validateNoCrash(garbage);
}
}
private static long readAllEdits(Collection<EditLogInputStream> streams,
long startTxId) throws IOException {
FSEditLogOp op;
long nextTxId = startTxId;
long numTx = 0;
for (EditLogInputStream s : streams) {
while (true) {
op = s.readOp();
if (op == null)
break;
if (op.getTransactionId() != nextTxId) {
throw new IOException("out of order transaction ID! expected " +
nextTxId + " but got " + op.getTransactionId() + " when " +
"reading " + s.getName());
}
numTx++;
nextTxId = op.getTransactionId() + 1;
}
}
return numTx;
}
/**
* Test edit log failover. If a single edit log is missing, other
* edits logs should be used instead.
*/
@Test
public void testEditLogFailOverFromMissing() throws IOException {
File f1 = new File(TEST_DIR + "/failover0");
File f2 = new File(TEST_DIR + "/failover1");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
NNStorage storage = setupEdits(editUris, 3);
final long startErrorTxId = 1*TXNS_PER_ROLL + 1;
final long endErrorTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,
endErrorTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
assertTrue(files[0].delete());
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
Collection<EditLogInputStream> streams = null;
try {
streams = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
readAllEdits(streams, startTxId);
} catch (IOException e) {
LOG.error("edit log failover didn't work", e);
fail("Edit log failover didn't work");
} finally {
IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0]));
}
}
/**
* Test edit log failover from a corrupt edit log
*/
@Test
public void testEditLogFailOverFromCorrupt() throws IOException {
File f1 = new File(TEST_DIR + "/failover0");
File f2 = new File(TEST_DIR + "/failover1");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI());
NNStorage storage = setupEdits(editUris, 3);
final long startErrorTxId = 1*TXNS_PER_ROLL + 1;
final long endErrorTxId = 2*TXNS_PER_ROLL;
File[] files = new File(f1, "current").listFiles(new FilenameFilter() {
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startErrorTxId,
endErrorTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
long fileLen = files[0].length();
LOG.debug("Corrupting Log File: " + files[0] + " len: " + fileLen);
RandomAccessFile rwf = new RandomAccessFile(files[0], "rw");
rwf.seek(fileLen-4); // seek to checksum bytes
int b = rwf.readInt();
rwf.seek(fileLen-4);
rwf.writeInt(b+1);
rwf.close();
FSEditLog editlog = getFSEditLog(storage);
editlog.initJournalsForWrite();
long startTxId = 1;
Collection<EditLogInputStream> streams = null;
try {
streams = editlog.selectInputStreams(startTxId, 4*TXNS_PER_ROLL);
readAllEdits(streams, startTxId);
} catch (IOException e) {
LOG.error("edit log failover didn't work", e);
fail("Edit log failover didn't work");
} finally {
IOUtils.cleanup(null, streams.toArray(new EditLogInputStream[0]));
}
}
/**
* Test creating a directory with lots and lots of edit log segments
*/
@Test
public void testManyEditLogSegments() throws IOException {
final int NUM_EDIT_LOG_ROLLS = 1000;
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
final FSEditLog editLog = fsimage.getEditLog();
for (int i = 0; i < NUM_EDIT_LOG_ROLLS; i++){
editLog.logSetReplication("fakefile" + i, (short)(i % 3));
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getInProgressEditsFileName((i * 3) + 1));
editLog.logSync();
editLog.rollEditLog(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertExistsInStorageDirs(
cluster, NameNodeDirType.EDITS,
NNStorage.getFinalizedEditsFileName((i * 3) + 1, (i * 3) + 3));
}
editLog.close();
} finally {
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
// How long does it take to read through all these edit logs?
long startTime = Time.now();
try {
cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
long endTime = Time.now();
double delta = ((float)(endTime - startTime)) / 1000.0;
LOG.info(String.format("loaded %d edit log segments in %.2f seconds",
NUM_EDIT_LOG_ROLLS, delta));
}
/**
* Edit log op instances are cached internally using thread-local storage.
* This test checks that the cached instances are reset in between different
* transactions processed on the same thread, so that we don't accidentally
* apply incorrect attributes to an inode.
*
* @throws IOException if there is an I/O error
*/
@Test
public void testResetThreadLocalCachedOps() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
// Set single handler thread, so all transactions hit same thread-local ops.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HANDLER_COUNT_KEY, 1);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
// Create /dir1 with a default ACL.
Path dir1 = new Path("/dir1");
fileSys.mkdirs(dir1);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
fileSys.modifyAclEntries(dir1, aclSpec);
// /dir1/dir2 is expected to clone the default ACL.
Path dir2 = new Path("/dir1/dir2");
fileSys.mkdirs(dir2);
// /dir1/file1 is expected to clone the default ACL.
Path file1 = new Path("/dir1/file1");
fileSys.create(file1).close();
// /dir3 is not a child of /dir1, so must not clone the default ACL.
Path dir3 = new Path("/dir3");
fileSys.mkdirs(dir3);
// /file2 is not a child of /dir1, so must not clone the default ACL.
Path file2 = new Path("/file2");
fileSys.create(file2).close();
// Restart and assert the above stated expectations.
IOUtils.cleanup(LOG, fileSys);
cluster.restartNameNode();
fileSys = cluster.getFileSystem();
assertFalse(fileSys.getAclStatus(dir1).getEntries().isEmpty());
assertFalse(fileSys.getAclStatus(dir2).getEntries().isEmpty());
assertFalse(fileSys.getAclStatus(file1).getEntries().isEmpty());
assertTrue(fileSys.getAclStatus(dir3).getEntries().isEmpty());
assertTrue(fileSys.getAclStatus(file2).getEntries().isEmpty());
} finally {
IOUtils.cleanup(LOG, fileSys);
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 55,527
| 34.255873
| 100
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecondaryNameNodeUpgrade.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import com.google.common.collect.ImmutableMap;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Map;
import org.junit.Test;
import org.junit.Before;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Assert;
import org.apache.hadoop.test.GenericTestUtils;
/**
* Regression test for HDFS-3597, SecondaryNameNode upgrade -- when a 2NN
* starts up with an existing directory structure with an old VERSION file, it
* should delete the snapshot and download a new one from the NN.
*/
public class TestSecondaryNameNodeUpgrade {
@Before
public void cleanupCluster() throws IOException {
File hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
System.out.println("cleanupCluster deleting " + hdfsDir);
if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
}
private void doIt(Map<String, String> paramsToCorrupt) throws IOException {
MiniDFSCluster cluster = null;
FileSystem fs = null;
SecondaryNameNode snn = null;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
snn = new SecondaryNameNode(conf);
fs = cluster.getFileSystem();
fs.mkdirs(new Path("/test/foo"));
snn.doCheckpoint();
List<File> versionFiles = snn.getFSImage().getStorage().getFiles(null, "VERSION");
snn.shutdown();
for (File versionFile : versionFiles) {
for (Map.Entry<String, String> paramToCorrupt : paramsToCorrupt.entrySet()) {
String param = paramToCorrupt.getKey();
String val = paramToCorrupt.getValue();
System.out.println("Changing '" + param + "' to '" + val + "' in " + versionFile);
FSImageTestUtil.corruptVersionFile(versionFile, param, val);
}
}
snn = new SecondaryNameNode(conf);
fs.mkdirs(new Path("/test/bar"));
snn.doCheckpoint();
} finally {
if (fs != null) fs.close();
if (cluster != null) cluster.shutdown();
if (snn != null) snn.shutdown();
}
}
@Test
public void testUpgradeLayoutVersionSucceeds() throws IOException {
doIt(ImmutableMap.of("layoutVersion", "-39"));
}
@Test
public void testUpgradePreFedSucceeds() throws IOException {
doIt(ImmutableMap.of("layoutVersion", "-19", "clusterID", "",
"blockpoolID", ""));
}
@Test
public void testChangeNsIDFails() throws IOException {
try {
doIt(ImmutableMap.of("namespaceID", "2"));
Assert.fail("Should throw InconsistentFSStateException");
} catch(IOException e) {
GenericTestUtils.assertExceptionContains("Inconsistent checkpoint fields", e);
System.out.println("Correctly failed with inconsistent namespaceID: " + e);
}
}
}
| 4,096
| 32.040323
| 92
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.assertNNHasCheckpoints;
import static org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil.getNameNodeCurrentDirs;
import static org.apache.hadoop.test.MetricsAsserts.assertCounterGt;
import static org.apache.hadoop.test.MetricsAsserts.assertGaugeGt;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.FileOutputStream;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.lang.management.ManagementFactory;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import java.util.Random;
import com.google.common.io.Files;
import org.apache.commons.cli.ParseException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode.CheckpointStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.GenericTestUtils.DelayAnswer;
import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.apache.hadoop.util.StringUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import com.google.common.base.Joiner;
import com.google.common.base.Supplier;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.primitives.Ints;
/**
* This class tests the creation and validation of a checkpoint.
*/
public class TestCheckpoint {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
}
static final Log LOG = LogFactory.getLog(TestCheckpoint.class);
static final String NN_METRICS = "NameNodeActivity";
static final long seed = 0xDEADBEEFL;
static final int blockSize = 4096;
static final int fileSize = 8192;
static final int numDatanodes = 3;
short replication = 3;
static final FilenameFilter tmpEditsFilter = new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
return name.startsWith(NameNodeFile.EDITS_TMP.getName());
}
};
private CheckpointFaultInjector faultInjector;
@Before
public void setUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
faultInjector = Mockito.mock(CheckpointFaultInjector.class);
CheckpointFaultInjector.instance = faultInjector;
}
static void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, blockSize);
byte[] buffer = new byte[TestCheckpoint.fileSize];
Random rand = new Random(TestCheckpoint.seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
@After
public void checkForSNNThreads() {
GenericTestUtils.assertNoThreadsMatching(".*SecondaryNameNode.*");
}
static void checkFile(FileSystem fileSys, Path name, int repl)
throws IOException {
assertTrue(fileSys.exists(name));
int replication = fileSys.getFileStatus(name).getReplication();
assertEquals("replication for " + name, repl, replication);
//We should probably test for more of the file properties.
}
static void cleanupFile(FileSystem fileSys, Path name)
throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
/*
* Verify that namenode does not startup if one namedir is bad.
*/
@Test
public void testNameDirError() throws IOException {
LOG.info("Starting testNameDirError");
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.build();
Collection<URI> nameDirs = cluster.getNameDirs(0);
cluster.shutdown();
cluster = null;
for (URI nameDirUri : nameDirs) {
File dir = new File(nameDirUri.getPath());
try {
// Simulate the mount going read-only
FileUtil.setWritable(dir, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false).build();
fail("NN should have failed to start with " + dir + " set unreadable");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"storage directory does not exist or is not accessible", ioe);
} finally {
cleanup(cluster);
cluster = null;
FileUtil.setWritable(dir, true);
}
}
}
/**
* Checks that an IOException in NNStorage.writeTransactionIdFile is handled
* correctly (by removing the storage directory)
* See https://issues.apache.org/jira/browse/HDFS-2011
*/
@Test
public void testWriteTransactionIdHandlesIOE() throws Exception {
LOG.info("Check IOException handled correctly by writeTransactionIdFile");
ArrayList<URI> fsImageDirs = new ArrayList<URI>();
ArrayList<URI> editsDirs = new ArrayList<URI>();
File filePath =
new File(PathUtils.getTestDir(getClass()), "storageDirToCheck");
assertTrue("Couldn't create directory storageDirToCheck",
filePath.exists() || filePath.mkdirs());
fsImageDirs.add(filePath.toURI());
editsDirs.add(filePath.toURI());
NNStorage nnStorage = new NNStorage(new HdfsConfiguration(),
fsImageDirs, editsDirs);
try {
assertTrue("List of storage directories didn't have storageDirToCheck.",
nnStorage.getEditsDirectories().iterator().next().
toString().indexOf("storageDirToCheck") != -1);
assertTrue("List of removed storage directories wasn't empty",
nnStorage.getRemovedStorageDirs().isEmpty());
} finally {
// Delete storage directory to cause IOException in writeTransactionIdFile
assertTrue("Couldn't remove directory " + filePath.getAbsolutePath(),
filePath.delete());
}
// Just call writeTransactionIdFile using any random number
nnStorage.writeTransactionIdFileToStorage(1);
List<StorageDirectory> listRsd = nnStorage.getRemovedStorageDirs();
assertTrue("Removed directory wasn't what was expected",
listRsd.size() > 0 && listRsd.get(listRsd.size() - 1).getRoot().
toString().indexOf("storageDirToCheck") != -1);
nnStorage.close();
}
/*
* Simulate exception during edit replay.
*/
@Test(timeout=30000)
public void testReloadOnEditReplayFailure () throws IOException {
Configuration conf = new HdfsConfiguration();
FSDataOutputStream fos = null;
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
fos = fs.create(new Path("tmpfile0"));
fos.write(new byte[] { 0, 1, 2, 3 });
secondary.doCheckpoint();
fos.write(new byte[] { 0, 1, 2, 3 });
fos.hsync();
// Cause merge to fail in next checkpoint.
Mockito.doThrow(new IOException(
"Injecting failure during merge"))
.when(faultInjector).duringMerge();
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
} catch (IOException ioe) {
// This is expected.
}
Mockito.reset(faultInjector);
// The error must be recorded, so next checkpoint will reload image.
fos.write(new byte[] { 0, 1, 2, 3 });
fos.hsync();
assertTrue("Another checkpoint should have reloaded image",
secondary.doCheckpoint());
} finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
Mockito.reset(faultInjector);
}
}
/*
* Simulate 2NN exit due to too many merge failures.
*/
@Test(timeout=30000)
public void testTooManyEditReplayFailures() throws IOException {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_MAX_RETRIES_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 1);
FSDataOutputStream fos = null;
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.checkExitOnShutdown(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
fos = fs.create(new Path("tmpfile0"));
fos.write(new byte[] { 0, 1, 2, 3 });
// Cause merge to fail in next checkpoint.
Mockito.doThrow(new IOException(
"Injecting failure during merge"))
.when(faultInjector).duringMerge();
secondary = startSecondaryNameNode(conf);
secondary.doWork();
// Fail if we get here.
fail("2NN did not exit.");
} catch (ExitException ee) {
// ignore
ExitUtil.resetFirstExitException();
assertEquals("Max retries", 1, secondary.getMergeErrorCount() - 1);
} finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
Mockito.reset(faultInjector);
}
}
/*
* Simulate namenode crashing after rolling edit log.
*/
@Test
public void testSecondaryNamenodeError1()
throws IOException {
LOG.info("Starting testSecondaryNamenodeError1");
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpointxx.dat");
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
// Make the checkpoint fail after rolling the edits log.
secondary = startSecondaryNameNode(conf);
Mockito.doThrow(new IOException(
"Injecting failure after rolling edit logs"))
.when(faultInjector).afterSecondaryCallsRollEditLog();
try {
secondary.doCheckpoint(); // this should fail
assertTrue(false);
} catch (IOException e) {
// expected
}
Mockito.reset(faultInjector);
//
// Create a new file
//
writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
//
// Restart cluster and verify that file exists.
// Then take another checkpoint to verify that the
// namenode restart accounted for the rolled edit logs.
//
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/*
* Simulate a namenode crash after uploading new image
*/
@Test
public void testSecondaryNamenodeError2() throws IOException {
LOG.info("Starting testSecondaryNamenodeError2");
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpointyy.dat");
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
//
// Make the checkpoint fail after uploading the new fsimage.
//
secondary = startSecondaryNameNode(conf);
Mockito.doThrow(new IOException(
"Injecting failure after uploading new image"))
.when(faultInjector).afterSecondaryUploadsNewImage();
try {
secondary.doCheckpoint(); // this should fail
assertTrue(false);
} catch (IOException e) {
// expected
}
Mockito.reset(faultInjector);
//
// Create a new file
//
writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
//
// Restart cluster and verify that file exists.
// Then take another checkpoint to verify that the
// namenode restart accounted for the rolled edit logs.
//
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/*
* Simulate a secondary namenode crash after rolling the edit log.
*/
@Test
public void testSecondaryNamenodeError3() throws IOException {
LOG.info("Starting testSecondaryNamenodeError3");
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpointzz.dat");
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
//
// Make the checkpoint fail after rolling the edit log.
//
secondary = startSecondaryNameNode(conf);
Mockito.doThrow(new IOException(
"Injecting failure after rolling edit logs"))
.when(faultInjector).afterSecondaryCallsRollEditLog();
try {
secondary.doCheckpoint(); // this should fail
assertTrue(false);
} catch (IOException e) {
// expected
}
Mockito.reset(faultInjector);
secondary.shutdown(); // secondary namenode crash!
// start new instance of secondary and verify that
// a new rollEditLog suceedes inspite of the fact that
// edits.new already exists.
//
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint(); // this should work correctly
//
// Create a new file
//
writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
//
// Restart cluster and verify that file exists.
// Then take another checkpoint to verify that the
// namenode restart accounted for the twice-rolled edit logs.
//
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
secondary.shutdown();
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Simulate a secondary node failure to transfer image. Uses an unchecked
* error and fail transfer before even setting the length header. This used to
* cause image truncation. Regression test for HDFS-3330.
*/
@Test
public void testSecondaryFailsWithErrorBeforeSettingHeaders()
throws IOException {
Mockito.doThrow(new Error("If this exception is not caught by the " +
"name-node, fs image will be truncated."))
.when(faultInjector).beforeGetImageSetsHeaders();
doSecondaryFailsToReturnImage();
}
private void doSecondaryFailsToReturnImage() throws IOException {
LOG.info("Starting testSecondaryFailsToReturnImage");
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpointRI.dat");
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
FSImage image = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
image = cluster.getNameNode().getFSImage();
assertTrue(!fileSys.exists(file1));
StorageDirectory sd = image.getStorage().getStorageDir(0);
File latestImageBeforeCheckpoint = FSImageTestUtil.findLatestImageFile(sd);
long fsimageLength = latestImageBeforeCheckpoint.length();
//
// Make the checkpoint
//
secondary = startSecondaryNameNode(conf);
try {
secondary.doCheckpoint(); // this should fail
fail("Checkpoint succeeded even though we injected an error!");
} catch (IOException e) {
// check that it's the injected exception
GenericTestUtils.assertExceptionContains(
"If this exception is not caught", e);
}
Mockito.reset(faultInjector);
// Verify that image file sizes did not change.
for (StorageDirectory sd2 :
image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
File thisNewestImage = FSImageTestUtil.findLatestImageFile(sd2);
long len = thisNewestImage.length();
assertEquals(fsimageLength, len);
}
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
private File filePathContaining(final String substring) {
return Mockito.argThat(
new ArgumentMatcher<File>() {
@Override
public boolean matches(Object argument) {
String path = ((File) argument).getAbsolutePath();
return path.contains(substring);
}
});
}
private void checkTempImages(NNStorage storage) throws IOException {
List<File> dirs = new ArrayList<File>();
dirs.add(storage.getStorageDir(0).getCurrentDir());
dirs.add(storage.getStorageDir(1).getCurrentDir());
for (File dir : dirs) {
File[] list = dir.listFiles();
for (File f : list) {
// Throw an exception if a temp image file is found.
if(f.getName().contains(NNStorage.NameNodeFile.IMAGE_NEW.getName())) {
throw new IOException("Found " + f);
}
}
}
}
/**
* Simulate 2NN failing to send the whole file (error type 3)
* The length header in the HTTP transfer should prevent
* this from corrupting the NN.
*/
@Test
public void testNameNodeImageSendFailWrongSize()
throws IOException {
LOG.info("Starting testNameNodeImageSendFailWrongSize");
Mockito.doReturn(true).when(faultInjector)
.shouldSendShortFile(filePathContaining("fsimage"));
doSendFailTest("is not of the advertised size");
}
/**
* Simulate 2NN sending a corrupt image (error type 4)
* The digest header in the HTTP transfer should prevent
* this from corrupting the NN.
*/
@Test
public void testNameNodeImageSendFailWrongDigest()
throws IOException {
LOG.info("Starting testNameNodeImageSendFailWrongDigest");
Mockito.doReturn(true).when(faultInjector)
.shouldCorruptAByte(Mockito.any(File.class));
doSendFailTest("does not match advertised digest");
}
/**
* Run a test where the 2NN runs into some kind of error when
* sending the checkpoint back to the NN.
* @param exceptionSubstring an expected substring of the triggered exception
*/
private void doSendFailTest(String exceptionSubstring)
throws IOException {
Configuration conf = new HdfsConfiguration();
Path file1 = new Path("checkpoint-doSendFailTest-doSendFailTest.dat");
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
//
// Make the checkpoint fail after rolling the edit log.
//
secondary = startSecondaryNameNode(conf);
try {
secondary.doCheckpoint(); // this should fail
fail("Did not get expected exception");
} catch (IOException e) {
// We only sent part of the image. Have to trigger this exception
GenericTestUtils.assertExceptionContains(exceptionSubstring, e);
}
Mockito.reset(faultInjector);
// Make sure there is no temporary files left around.
checkTempImages(cluster.getNameNode().getFSImage().getStorage());
checkTempImages(secondary.getFSImage().getStorage());
secondary.shutdown(); // secondary namenode crash!
secondary = null;
// start new instance of secondary and verify that
// a new rollEditLog succedes in spite of the fact that we had
// a partially failed checkpoint previously.
//
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint(); // this should work correctly
//
// Create a new file
//
writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test that the NN locks its storage and edits directories, and won't start up
* if the directories are already locked
**/
@Test
public void testNameDirLocking() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
// Start a NN, and verify that lock() fails in all of the configured
// directories
StorageDirectory savedSd = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd = sd;
}
} finally {
cleanup(cluster);
cluster = null;
}
assertNotNull(savedSd);
// Lock one of the saved directories, then start the NN, and make sure it
// fails to start
assertClusterStartFailsWhenDirLocked(conf, savedSd);
}
/**
* Test that, if the edits dir is separate from the name dir, it is
* properly locked.
**/
@Test
public void testSeparateEditsDirLocking() throws IOException {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
File editsDir = new File(MiniDFSCluster.getBaseDirectory(),
"testSeparateEditsDirLocking");
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
editsDir.getAbsolutePath());
MiniDFSCluster cluster = null;
// Start a NN, and verify that lock() fails in all of the configured
// directories
StorageDirectory savedSd = null;
try {
cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
.numDataNodes(0).build();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
assertEquals(editsDir.getAbsoluteFile(), sd.getRoot());
assertLockFails(sd);
savedSd = sd;
}
} finally {
cleanup(cluster);
cluster = null;
}
assertNotNull(savedSd);
// Lock one of the saved directories, then start the NN, and make sure it
// fails to start
assertClusterStartFailsWhenDirLocked(conf, savedSd);
}
/**
* Test that the SecondaryNameNode properly locks its storage directories.
*/
@Test
public void testSecondaryNameNodeLocking() throws Exception {
// Start a primary NN so that the secondary will start successfully
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
StorageDirectory savedSd = null;
// Start a secondary NN, then make sure that all of its storage
// dirs got locked.
secondary = startSecondaryNameNode(conf);
NNStorage storage = secondary.getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd = sd;
}
LOG.info("===> Shutting down first 2NN");
secondary.shutdown();
secondary = null;
LOG.info("===> Locking a dir, starting second 2NN");
// Lock one of its dirs, make sure it fails to start
LOG.info("Trying to lock" + savedSd);
savedSd.lock();
try {
secondary = startSecondaryNameNode(conf);
assertFalse("Should fail to start 2NN when " + savedSd + " is locked",
savedSd.isLockSupported());
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("already locked", ioe);
} finally {
savedSd.unlock();
}
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test that, an attempt to lock a storage that is already locked by nodename,
* logs error message that includes JVM name of the namenode that locked it.
*/
@Test
public void testStorageAlreadyLockedErrorMessage() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
StorageDirectory savedSd = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
for (StorageDirectory sd : storage.dirIterable(null)) {
assertLockFails(sd);
savedSd = sd;
}
LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs(
LogFactory.getLog(Storage.class));
try {
// try to lock the storage that's already locked
savedSd.lock();
fail("Namenode should not be able to lock a storage" +
" that is already locked");
} catch (IOException ioe) {
// cannot read lock file on Windows, so message cannot get JVM name
String lockingJvmName = Path.WINDOWS ? "" :
" " + ManagementFactory.getRuntimeMXBean().getName();
String expectedLogMessage = "It appears that another node "
+ lockingJvmName + " has already locked the storage directory";
assertTrue("Log output does not contain expected log message: "
+ expectedLogMessage, logs.getOutput().contains(expectedLogMessage));
}
} finally {
cleanup(cluster);
cluster = null;
}
}
/**
* Assert that the given storage directory can't be locked, because
* it's already locked.
*/
private static void assertLockFails(StorageDirectory sd) {
try {
sd.lock();
// If the above line didn't throw an exception, then
// locking must not be supported
assertFalse(sd.isLockSupported());
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("already locked", ioe);
}
}
/**
* Assert that, if sdToLock is locked, the cluster is not allowed to start up.
* @param conf cluster conf to use
* @param sdToLock the storage directory to lock
*/
private static void assertClusterStartFailsWhenDirLocked(
Configuration conf, StorageDirectory sdToLock) throws IOException {
// Lock the edits dir, then start the NN, and make sure it fails to start
sdToLock.lock();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).format(false)
.manageNameDfsDirs(false).numDataNodes(0).build();
assertFalse("cluster should fail to start after locking " +
sdToLock, sdToLock.isLockSupported());
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("already locked", ioe);
} finally {
cleanup(cluster);
cluster = null;
sdToLock.unlock();
}
}
/**
* Test the importCheckpoint startup option. Verifies:
* 1. if the NN already contains an image, it will not be allowed
* to import a checkpoint.
* 2. if the NN does not contain an image, importing a checkpoint
* succeeds and re-saves the image
*/
@Test
public void testImportCheckpoint() throws Exception {
Configuration conf = new HdfsConfiguration();
Path testPath = new Path("/testfile");
SecondaryNameNode snn = null;
MiniDFSCluster cluster = null;
Collection<URI> nameDirs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
nameDirs = cluster.getNameDirs(0);
// Make an entry in the namespace, used for verifying checkpoint
// later.
cluster.getFileSystem().mkdirs(testPath);
// Take a checkpoint
snn = startSecondaryNameNode(conf);
snn.doCheckpoint();
} finally {
cleanup(snn);
cleanup(cluster);
cluster = null;
}
LOG.info("Trying to import checkpoint when the NameNode already " +
"contains an image. This should fail.");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
.startupOption(StartupOption.IMPORT).build();
fail("NameNode did not fail to start when it already contained " +
"an image");
} catch (IOException ioe) {
// Expected
GenericTestUtils.assertExceptionContains(
"NameNode already contains an image", ioe);
} finally {
cleanup(cluster);
cluster = null;
}
LOG.info("Removing NN storage contents");
for(URI uri : nameDirs) {
File dir = new File(uri.getPath());
LOG.info("Cleaning " + dir);
removeAndRecreateDir(dir);
}
LOG.info("Trying to import checkpoint");
try {
cluster = new MiniDFSCluster.Builder(conf).format(false).numDataNodes(0)
.startupOption(StartupOption.IMPORT).build();
assertTrue("Path from checkpoint should exist after import",
cluster.getFileSystem().exists(testPath));
// Make sure that the image got saved on import
FSImageTestUtil.assertNNHasCheckpoints(cluster, Ints.asList(3));
} finally {
cleanup(cluster);
cluster = null;
}
}
private static void removeAndRecreateDir(File dir) throws IOException {
if(dir.exists())
if(!(FileUtil.fullyDelete(dir)))
throw new IOException("Cannot remove directory: " + dir);
if (!dir.mkdirs())
throw new IOException("Cannot create directory " + dir);
}
SecondaryNameNode startSecondaryNameNode(Configuration conf
) throws IOException {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
return new SecondaryNameNode(conf);
}
SecondaryNameNode startSecondaryNameNode(Configuration conf, int index)
throws IOException {
Configuration snnConf = new Configuration(conf);
snnConf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/2nn-" + index);
return new SecondaryNameNode(snnConf);
}
/**
* Tests checkpoint in HDFS.
*/
@Test
public void testCheckpoint() throws IOException {
Path file1 = new Path("checkpoint.dat");
Path file2 = new Path("checkpoint2.dat");
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
numDatanodes).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
//
// verify that 'format' really blew away all pre-existing files
//
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
//
// Create file1
//
writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication);
//
// Take a checkpoint
//
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
assertCounterGt("GetImageNumOps", 0, rb);
assertCounterGt("GetEditNumOps", 0, rb);
assertCounterGt("PutImageNumOps", 0, rb);
assertGaugeGt("GetImageAvgTime", 0.0, rb);
assertGaugeGt("GetEditAvgTime", 0.0, rb);
assertGaugeGt("PutImageAvgTime", 0.0, rb);
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
//
// Restart cluster and verify that file1 still exist.
//
Path tmpDir = new Path("/tmp_tmp");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
// check that file1 still exists
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
// create new file file2
writeFile(fileSys, file2, replication);
checkFile(fileSys, file2, replication);
//
// Take a checkpoint
//
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
fileSys.delete(tmpDir, true);
fileSys.mkdirs(tmpDir);
secondary.doCheckpoint();
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
//
// Restart cluster and verify that file2 exists and
// file1 does not exist.
//
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(tmpDir));
try {
// verify that file2 exists
checkFile(fileSys, file2, replication);
} finally {
fileSys.close();
cluster.shutdown();
cluster = null;
}
}
/**
* Tests save namespace.
*/
@Test
public void testSaveNamespace() throws IOException {
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
FileContext fc;
try {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(true).build();
cluster.waitActive();
fs = (cluster.getFileSystem());
fc = FileContext.getFileContext(cluster.getURI(0));
// Saving image without safe mode should fail
DFSAdmin admin = new DFSAdmin(conf);
String[] args = new String[]{"-saveNamespace"};
try {
admin.run(args);
} catch(IOException eIO) {
assertTrue(eIO.getLocalizedMessage().contains("Safe mode should be turned ON"));
} catch(Exception e) {
throw new IOException(e);
}
// create new file
Path file = new Path("namespace.dat");
writeFile(fs, file, replication);
checkFile(fs, file, replication);
// create new link
Path symlink = new Path("file.link");
fc.createSymlink(file, symlink, false);
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
// verify that the edits file is NOT empty
Collection<URI> editsDirs = cluster.getNameEditsDirs(0);
for(URI uri : editsDirs) {
File ed = new File(uri.getPath());
assertTrue(new File(ed, "current/"
+ NNStorage.getInProgressEditsFileName(1))
.length() > Integer.SIZE/Byte.SIZE);
}
// Saving image in safe mode should succeed
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
try {
admin.run(args);
} catch(Exception e) {
throw new IOException(e);
}
// TODO: Fix the test to not require a hard-coded transaction count.
final int EXPECTED_TXNS_FIRST_SEG = 13;
// the following steps should have happened:
// edits_inprogress_1 -> edits_1-12 (finalized)
// fsimage_12 created
// edits_inprogress_13 created
//
for(URI uri : editsDirs) {
File ed = new File(uri.getPath());
File curDir = new File(ed, "current");
LOG.info("Files in " + curDir + ":\n " +
Joiner.on("\n ").join(curDir.list()));
// Verify that the first edits file got finalized
File originalEdits = new File(curDir,
NNStorage.getInProgressEditsFileName(1));
assertFalse(originalEdits.exists());
File finalizedEdits = new File(curDir,
NNStorage.getFinalizedEditsFileName(1, EXPECTED_TXNS_FIRST_SEG));
GenericTestUtils.assertExists(finalizedEdits);
assertTrue(finalizedEdits.length() > Integer.SIZE/Byte.SIZE);
GenericTestUtils.assertExists(new File(ed, "current/"
+ NNStorage.getInProgressEditsFileName(
EXPECTED_TXNS_FIRST_SEG + 1)));
}
Collection<URI> imageDirs = cluster.getNameDirs(0);
for (URI uri : imageDirs) {
File imageDir = new File(uri.getPath());
File savedImage = new File(imageDir, "current/"
+ NNStorage.getImageFileName(
EXPECTED_TXNS_FIRST_SEG));
assertTrue("Should have saved image at " + savedImage,
savedImage.exists());
}
// restart cluster and verify file exists
cluster.shutdown();
cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes).format(false).build();
cluster.waitActive();
fs = (cluster.getFileSystem());
checkFile(fs, file, replication);
fc = FileContext.getFileContext(cluster.getURI(0));
assertTrue(fc.getFileLinkStatus(symlink).isSymlink());
} finally {
if(fs != null) fs.close();
cleanup(cluster);
cluster = null;
}
}
/* Test case to test CheckpointSignature */
@Test
public void testCheckpointSignature() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(true).build();
NameNode nn = cluster.getNameNode();
NamenodeProtocols nnRpc = nn.getRpcServer();
secondary = startSecondaryNameNode(conf);
// prepare checkpoint image
secondary.doCheckpoint();
CheckpointSignature sig = nnRpc.rollEditLog();
// manipulate the CheckpointSignature fields
sig.setBlockpoolID("somerandomebpid");
sig.clusterID = "somerandomcid";
try {
sig.validateStorageInfo(nn.getFSImage()); // this should fail
assertTrue("This test is expected to fail.", false);
} catch (Exception ignored) {
}
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Tests the following sequence of events:
* - secondary successfully makes a checkpoint
* - it then fails while trying to upload it
* - it then fails again for the same reason
* - it then tries to checkpoint a third time
*/
@Test
public void testCheckpointAfterTwoFailedUploads() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(true).build();
secondary = startSecondaryNameNode(conf);
Mockito.doThrow(new IOException(
"Injecting failure after rolling edit logs"))
.when(faultInjector).afterSecondaryCallsRollEditLog();
// Fail to checkpoint once
try {
secondary.doCheckpoint();
fail("Should have failed upload");
} catch (IOException ioe) {
LOG.info("Got expected failure", ioe);
assertTrue(ioe.toString().contains("Injecting failure"));
}
// Fail to checkpoint again
try {
secondary.doCheckpoint();
fail("Should have failed upload");
} catch (IOException ioe) {
LOG.info("Got expected failure", ioe);
assertTrue(ioe.toString().contains("Injecting failure"));
} finally {
Mockito.reset(faultInjector);
}
// Now with the cleared error simulation, it should succeed
secondary.doCheckpoint();
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Starts two namenodes and two secondary namenodes, verifies that secondary
* namenodes are configured correctly to talk to their respective namenodes
* and can do the checkpoint.
*
* @throws IOException
*/
@Test
public void testMultipleSecondaryNamenodes() throws IOException {
Configuration conf = new HdfsConfiguration();
String nameserviceId1 = "ns1";
String nameserviceId2 = "ns2";
conf.set(DFSConfigKeys.DFS_NAMESERVICES, nameserviceId1
+ "," + nameserviceId2);
MiniDFSCluster cluster = null;
SecondaryNameNode secondary1 = null;
SecondaryNameNode secondary2 = null;
try {
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleFederatedTopology(
conf.get(DFSConfigKeys.DFS_NAMESERVICES)))
.build();
Configuration snConf1 = new HdfsConfiguration(cluster.getConfiguration(0));
Configuration snConf2 = new HdfsConfiguration(cluster.getConfiguration(1));
InetSocketAddress nn1RpcAddress = cluster.getNameNode(0)
.getNameNodeAddress();
InetSocketAddress nn2RpcAddress = cluster.getNameNode(1)
.getNameNodeAddress();
String nn1 = nn1RpcAddress.getHostName() + ":" + nn1RpcAddress.getPort();
String nn2 = nn2RpcAddress.getHostName() + ":" + nn2RpcAddress.getPort();
// Set the Service Rpc address to empty to make sure the node specific
// setting works
snConf1.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
snConf2.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "");
// Set the nameserviceIds
snConf1.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId1),
nn1);
snConf2.set(DFSUtil.addKeySuffixes(
DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, nameserviceId2),
nn2);
secondary1 = startSecondaryNameNode(snConf1);
secondary2 = startSecondaryNameNode(snConf2);
// make sure the two secondary namenodes are talking to correct namenodes.
assertEquals(secondary1.getNameNodeAddress().getPort(),
nn1RpcAddress.getPort());
assertEquals(secondary2.getNameNodeAddress().getPort(),
nn2RpcAddress.getPort());
assertTrue(secondary1.getNameNodeAddress().getPort() != secondary2
.getNameNodeAddress().getPort());
// both should checkpoint.
secondary1.doCheckpoint();
secondary2.doCheckpoint();
} finally {
cleanup(secondary1);
secondary1 = null;
cleanup(secondary2);
secondary2 = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test that the secondary doesn't have to re-download image
* if it hasn't changed.
*/
@Test
public void testSecondaryImageDownload() throws IOException {
LOG.info("Starting testSecondaryImageDownload");
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
Path dir = new Path("/checkpoint");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(numDatanodes)
.format(true).build();
cluster.waitActive();
FileSystem fileSys = cluster.getFileSystem();
FSImage image = cluster.getNameNode().getFSImage();
SecondaryNameNode secondary = null;
try {
assertTrue(!fileSys.exists(dir));
//
// Make the checkpoint
//
secondary = startSecondaryNameNode(conf);
File secondaryDir = new File(MiniDFSCluster.getBaseDirectory(), "namesecondary1");
File secondaryCurrent = new File(secondaryDir, "current");
long expectedTxIdToDownload = cluster.getNameNode().getFSImage()
.getStorage().getMostRecentCheckpointTxId();
File secondaryFsImageBefore = new File(secondaryCurrent,
NNStorage.getImageFileName(expectedTxIdToDownload));
File secondaryFsImageAfter = new File(secondaryCurrent,
NNStorage.getImageFileName(expectedTxIdToDownload + 2));
assertFalse("Secondary should start with empty current/ dir " +
"but " + secondaryFsImageBefore + " exists",
secondaryFsImageBefore.exists());
assertTrue("Secondary should have loaded an image",
secondary.doCheckpoint());
assertTrue("Secondary should have downloaded original image",
secondaryFsImageBefore.exists());
assertTrue("Secondary should have created a new image",
secondaryFsImageAfter.exists());
long fsimageLength = secondaryFsImageBefore.length();
assertEquals("Image size should not have changed",
fsimageLength,
secondaryFsImageAfter.length());
// change namespace
fileSys.mkdirs(dir);
assertFalse("Another checkpoint should not have to re-load image",
secondary.doCheckpoint());
for (StorageDirectory sd :
image.getStorage().dirIterable(NameNodeDirType.IMAGE)) {
File imageFile = NNStorage.getImageFile(sd, NameNodeFile.IMAGE,
expectedTxIdToDownload + 5);
assertTrue("Image size increased",
imageFile.length() > fsimageLength);
}
} finally {
fileSys.close();
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test NN restart if a failure happens in between creating the fsimage
* MD5 file and renaming the fsimage.
*/
@Test(timeout=30000)
public void testFailureBeforeRename () throws IOException {
Configuration conf = new HdfsConfiguration();
FSDataOutputStream fos = null;
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
fos = fs.create(new Path("tmpfile0"));
fos.write(new byte[] { 0, 1, 2, 3 });
secondary.doCheckpoint();
fos.write(new byte[] { 0, 1, 2, 3 });
fos.hsync();
// Cause merge to fail in next checkpoint.
Mockito.doThrow(new IOException(
"Injecting failure after MD5Rename"))
.when(faultInjector).afterMD5Rename();
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
} catch (IOException ioe) {
// This is expected.
}
Mockito.reset(faultInjector);
// Namenode should still restart successfully
cluster.restartNameNode();
} finally {
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
Mockito.reset(faultInjector);
}
}
/**
* Test that a fault while downloading edits does not prevent future
* checkpointing
*/
@Test(timeout = 30000)
public void testEditFailureBeforeRename() throws IOException {
Configuration conf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs, new Path("tmpfile0"), 1024, (short) 1, 0l);
secondary.doCheckpoint();
// Cause edit rename to fail during next checkpoint
Mockito.doThrow(new IOException("Injecting failure before edit rename"))
.when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs, new Path("tmpfile1"), 1024, (short) 1, 0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Injecting failure before edit rename", ioe);
}
Mockito.reset(faultInjector);
// truncate the tmp edits file to simulate a partial download
for (StorageDirectory sd : secondary.getFSImage().getStorage()
.dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue(
"Expected a single tmp edits file in directory " + sd.toString(),
tmpEdits.length == 1);
RandomAccessFile randFile = new RandomAccessFile(tmpEdits[0], "rw");
randFile.setLength(0);
randFile.close();
}
// Next checkpoint should succeed
secondary.doCheckpoint();
} finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
Mockito.reset(faultInjector);
}
}
/**
* Test that a fault while downloading edits the first time after the 2NN
* starts up does not prevent future checkpointing.
*/
@Test(timeout = 30000)
public void testEditFailureOnFirstCheckpoint() throws IOException {
Configuration conf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
fs.mkdirs(new Path("test-file-1"));
// Make sure the on-disk fsimage on the NN has txid > 0.
FSNamesystem fsns = cluster.getNamesystem();
fsns.enterSafeMode(false);
fsns.saveNamespace();
fsns.leaveSafeMode();
secondary = startSecondaryNameNode(conf);
// Cause edit rename to fail during next checkpoint
Mockito.doThrow(new IOException("Injecting failure before edit rename"))
.when(faultInjector).beforeEditsRename();
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Injecting failure before edit rename", ioe);
}
Mockito.reset(faultInjector);
// Next checkpoint should succeed
secondary.doCheckpoint();
} finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
/**
* Test that the secondary namenode correctly deletes temporary edits
* on startup.
*/
@Test(timeout = 60000)
public void testDeleteTemporaryEditsOnStartup() throws IOException {
Configuration conf = new HdfsConfiguration();
SecondaryNameNode secondary = null;
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
DFSTestUtil.createFile(fs, new Path("tmpfile0"), 1024, (short) 1, 0l);
secondary.doCheckpoint();
// Cause edit rename to fail during next checkpoint
Mockito.doThrow(new IOException("Injecting failure before edit rename"))
.when(faultInjector).beforeEditsRename();
DFSTestUtil.createFile(fs, new Path("tmpfile1"), 1024, (short) 1, 0l);
try {
secondary.doCheckpoint();
fail("Fault injection failed.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"Injecting failure before edit rename", ioe);
}
Mockito.reset(faultInjector);
// Verify that a temp edits file is present
for (StorageDirectory sd : secondary.getFSImage().getStorage()
.dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue(
"Expected a single tmp edits file in directory " + sd.toString(),
tmpEdits.length == 1);
}
// Restart 2NN
secondary.shutdown();
secondary = startSecondaryNameNode(conf);
// Verify that tmp files were deleted
for (StorageDirectory sd : secondary.getFSImage().getStorage()
.dirIterable(NameNodeDirType.EDITS)) {
File[] tmpEdits = sd.getCurrentDir().listFiles(tmpEditsFilter);
assertTrue(
"Did not expect a tmp edits file in directory " + sd.toString(),
tmpEdits.length == 0);
}
// Next checkpoint should succeed
secondary.doCheckpoint();
} finally {
if (secondary != null) {
secondary.shutdown();
}
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
Mockito.reset(faultInjector);
}
}
/**
* Test case where two secondary namenodes are checkpointing the same
* NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}
* since that test runs against two distinct NNs.
*
* This case tests the following interleaving:
* - 2NN A downloads image (up to txid 2)
* - 2NN A about to save its own checkpoint
* - 2NN B downloads image (up to txid 4)
* - 2NN B uploads checkpoint (txid 4)
* - 2NN A uploads checkpoint (txid 2)
*
* It verifies that this works even though the earlier-txid checkpoint gets
* uploaded after the later-txid checkpoint.
*/
@Test
public void testMultipleSecondaryNNsAgainstSameNN() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
SecondaryNameNode secondary1 = null, secondary2 = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
.build();
// Start 2NNs
secondary1 = startSecondaryNameNode(conf, 1);
secondary2 = startSecondaryNameNode(conf, 2);
// Make the first 2NN's checkpoint process delayable - we can pause it
// right before it saves its checkpoint image.
CheckpointStorage spyImage1 = spyOnSecondaryImage(secondary1);
DelayAnswer delayer = new DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(spyImage1)
.saveFSImageInAllDirs(Mockito.<FSNamesystem>any(), Mockito.anyLong());
// Set up a thread to do a checkpoint from the first 2NN
DoCheckpointThread checkpointThread = new DoCheckpointThread(secondary1);
checkpointThread.start();
// Wait for the first checkpointer to get to where it should save its image.
delayer.waitForCall();
// Now make the second checkpointer run an entire checkpoint
secondary2.doCheckpoint();
// Let the first one finish
delayer.proceed();
// It should have succeeded even though another checkpoint raced with it.
checkpointThread.join();
checkpointThread.propagateExceptions();
// primary should record "last checkpoint" as the higher txid (even though
// a checkpoint with a lower txid finished most recently)
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
assertEquals(4, storage.getMostRecentCheckpointTxId());
// Should have accepted both checkpoints
assertNNHasCheckpoints(cluster, ImmutableList.of(2,4));
// Now have second one checkpoint one more time just to make sure that
// the NN isn't left in a broken state
secondary2.doCheckpoint();
// NN should have received new checkpoint
assertEquals(6, storage.getMostRecentCheckpointTxId());
// Validate invariant that files named the same are the same.
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2));
// NN should have removed the checkpoint at txid 2 at this point, but has
// one at txid 6
assertNNHasCheckpoints(cluster, ImmutableList.of(4,6));
} finally {
cleanup(secondary1);
secondary1 = null;
cleanup(secondary2);
secondary2 = null;
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
}
/**
* Test case where two secondary namenodes are checkpointing the same
* NameNode. This differs from {@link #testMultipleSecondaryNamenodes()}
* since that test runs against two distinct NNs.
*
* This case tests the following interleaving:
* - 2NN A) calls rollEdits()
* - 2NN B) calls rollEdits()
* - 2NN A) paused at getRemoteEditLogManifest()
* - 2NN B) calls getRemoteEditLogManifest() (returns up to txid 4)
* - 2NN B) uploads checkpoint fsimage_4
* - 2NN A) allowed to proceed, also returns up to txid 4
* - 2NN A) uploads checkpoint fsimage_4 as well, should fail gracefully
*
* It verifies that one of the two gets an error that it's uploading a
* duplicate checkpoint, and the other one succeeds.
*/
@Test
public void testMultipleSecondaryNNsAgainstSameNN2() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
SecondaryNameNode secondary1 = null, secondary2 = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
.build();
// Start 2NNs
secondary1 = startSecondaryNameNode(conf, 1);
secondary2 = startSecondaryNameNode(conf, 2);
// Make the first 2NN's checkpoint process delayable - we can pause it
// right before it calls getRemoteEditLogManifest.
// The method to set up a spy on an RPC protocol is a little bit involved
// since we can't spy directly on a proxy object. This sets up a mock
// which delegates all its calls to the original object, instead.
final NamenodeProtocol origNN = secondary1.getNameNode();
final Answer<Object> delegator = new GenericTestUtils.DelegateAnswer(origNN);
NamenodeProtocol spyNN = Mockito.mock(NamenodeProtocol.class, delegator);
DelayAnswer delayer = new DelayAnswer(LOG) {
@Override
protected Object passThrough(InvocationOnMock invocation) throws Throwable {
return delegator.answer(invocation);
}
};
secondary1.setNameNode(spyNN);
Mockito.doAnswer(delayer).when(spyNN)
.getEditLogManifest(Mockito.anyLong());
// Set up a thread to do a checkpoint from the first 2NN
DoCheckpointThread checkpointThread = new DoCheckpointThread(secondary1);
checkpointThread.start();
// Wait for the first checkpointer to be about to call getEditLogManifest
delayer.waitForCall();
// Now make the second checkpointer run an entire checkpoint
secondary2.doCheckpoint();
// NN should have now received fsimage_4
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
assertEquals(4, storage.getMostRecentCheckpointTxId());
// Let the first one finish
delayer.proceed();
// Letting the first node continue, it should try to upload the
// same image, and gracefully ignore it, while logging an
// error message.
checkpointThread.join();
checkpointThread.propagateExceptions();
// primary should still consider fsimage_4 the latest
assertEquals(4, storage.getMostRecentCheckpointTxId());
// Now have second one checkpoint one more time just to make sure that
// the NN isn't left in a broken state
secondary2.doCheckpoint();
assertEquals(6, storage.getMostRecentCheckpointTxId());
// Should have accepted both checkpoints
assertNNHasCheckpoints(cluster, ImmutableList.of(4,6));
// Let the first one also go again on its own to make sure it can
// continue at next checkpoint
secondary1.setNameNode(origNN);
secondary1.doCheckpoint();
// NN should have received new checkpoint
assertEquals(8, storage.getMostRecentCheckpointTxId());
// Validate invariant that files named the same are the same.
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary1, secondary2));
// Validate that the NN received checkpoints at expected txids
// (i.e that both checkpoints went through)
assertNNHasCheckpoints(cluster, ImmutableList.of(6,8));
} finally {
cleanup(secondary1);
secondary1 = null;
cleanup(secondary2);
secondary2 = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test case where the name node is reformatted while the secondary namenode
* is running. The secondary should shut itself down if if talks to a NN
* with the wrong namespace.
*/
@Test
public void testReformatNNBetweenCheckpoints() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
conf.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
1);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(true).build();
int origPort = cluster.getNameNodePort();
int origHttpPort = cluster.getNameNode().getHttpAddress().getPort();
Configuration snnConf = new Configuration(conf);
File checkpointDir = new File(MiniDFSCluster.getBaseDirectory(),
"namesecondary");
snnConf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
checkpointDir.getAbsolutePath());
secondary = startSecondaryNameNode(snnConf);
// secondary checkpoints once
secondary.doCheckpoint();
// we reformat primary NN
cluster.shutdown();
cluster = null;
// Brief sleep to make sure that the 2NN's IPC connection to the NN
// is dropped.
try {
Thread.sleep(100);
} catch (InterruptedException ie) {
}
// Start a new NN with the same host/port.
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.nameNodePort(origPort)
.nameNodeHttpPort(origHttpPort)
.format(true).build();
try {
secondary.doCheckpoint();
fail("Should have failed checkpoint against a different namespace");
} catch (IOException ioe) {
LOG.info("Got expected failure", ioe);
assertTrue(ioe.toString().contains("Inconsistent checkpoint"));
}
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test that the primary NN will not serve any files to a 2NN who doesn't
* share its namespace ID, and also will not accept any files from one.
*/
@Test
public void testNamespaceVerifiedOnFileTransfer() throws IOException {
MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(true).build();
NamenodeProtocols nn = cluster.getNameNodeRpc();
URL fsName = DFSUtil.getInfoServer(
cluster.getNameNode().getServiceRpcAddress(), conf,
DFSUtil.getHttpClientScheme(conf)).toURL();
// Make a finalized log on the server side.
nn.rollEditLog();
RemoteEditLogManifest manifest = nn.getEditLogManifest(1);
RemoteEditLog log = manifest.getLogs().get(0);
NNStorage dstImage = Mockito.mock(NNStorage.class);
Mockito.doReturn(Lists.newArrayList(new File("/wont-be-written")))
.when(dstImage).getFiles(
Mockito.<NameNodeDirType>anyObject(), Mockito.anyString());
File mockImageFile = File.createTempFile("image", "");
FileOutputStream imageFile = new FileOutputStream(mockImageFile);
imageFile.write("data".getBytes());
imageFile.close();
Mockito.doReturn(mockImageFile).when(dstImage)
.findImageFile(Mockito.any(NameNodeFile.class), Mockito.anyLong());
Mockito.doReturn(new StorageInfo(1, 1, "X", 1, NodeType.NAME_NODE).toColonSeparatedString())
.when(dstImage).toColonSeparatedString();
try {
TransferFsImage.downloadImageToStorage(fsName, 0, dstImage, false);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
try {
TransferFsImage.downloadEditsToStorage(fsName, log, dstImage);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
try {
TransferFsImage.uploadImageFromStorage(fsName, conf, dstImage,
NameNodeFile.IMAGE, 0);
fail("Storage info was not verified");
} catch (IOException ioe) {
String msg = StringUtils.stringifyException(ioe);
assertTrue(msg, msg.contains("but the secondary expected"));
}
} finally {
cleanup(cluster);
cluster = null;
}
}
/**
* Test that, if a storage directory is failed when a checkpoint occurs,
* the non-failed storage directory receives the checkpoint.
*/
@Test
public void testCheckpointWithFailedStorageDir() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(true).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of a volume -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
StorageDirectory sd1 = storage.getStorageDir(1);
currentDir = sd0.getCurrentDir();
FileUtil.setExecutable(currentDir, false);
// Upload checkpoint when NN has a bad storage dir. This should
// succeed and create the checkpoint in the good dir.
secondary.doCheckpoint();
GenericTestUtils.assertExists(
new File(sd1.getCurrentDir(), NNStorage.getImageFileName(2)));
// Restore the good dir
FileUtil.setExecutable(currentDir, true);
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to both dirs
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.setExecutable(currentDir, true);
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test case where the NN is configured with a name-only and an edits-only
* dir, with storage-restore turned on. In this case, if the name-only dir
* disappears and comes back, a new checkpoint after it has been restored
* should function correctly.
* @throws Exception
*/
@Test
public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File currentDir = null;
Configuration conf = new HdfsConfiguration();
File base_dir = new File(MiniDFSCluster.getBaseDirectory());
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/name-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
MiniDFSCluster.getBaseDirectory() + "/edits-only");
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,
fileAsURI(new File(base_dir, "namesecondary1")).toString());
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
.manageNameDfsDirs(false).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN experiences failure of its only name dir -- fake by
// setting its current dir to a-x permissions
NamenodeProtocols nn = cluster.getNameNodeRpc();
NNStorage storage = cluster.getNameNode().getFSImage().getStorage();
StorageDirectory sd0 = storage.getStorageDir(0);
assertEquals(NameNodeDirType.IMAGE, sd0.getStorageDirType());
currentDir = sd0.getCurrentDir();
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "000"));
// Try to upload checkpoint -- this should fail since there are no
// valid storage dirs
try {
secondary.doCheckpoint();
fail("Did not fail to checkpoint when there are no valid storage dirs");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
"No targets in destination storage", ioe);
}
// Restore the good dir
assertEquals(0, FileUtil.chmod(currentDir.getAbsolutePath(), "755"));
nn.restoreFailedStorage("true");
nn.rollEditLog();
// Checkpoint again -- this should upload to the restored name dir
secondary.doCheckpoint();
assertNNHasCheckpoints(cluster, ImmutableList.of(8));
assertParallelFilesInvariant(cluster, ImmutableList.of(secondary));
} finally {
if (currentDir != null) {
FileUtil.chmod(currentDir.getAbsolutePath(), "755");
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test that the 2NN triggers a checkpoint after the configurable interval
*/
@Test(timeout=30000)
public void testCheckpointTriggerOnTxnCount() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 10);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1);
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0)
.format(true).build();
FileSystem fs = cluster.getFileSystem();
secondary = startSecondaryNameNode(conf);
secondary.startCheckpointThread();
final NNStorage storage = secondary.getFSImage().getStorage();
// 2NN should checkpoint at startup
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info("Waiting for checkpoint txn id to go to 2");
return storage.getMostRecentCheckpointTxId() == 2;
}
}, 200, 15000);
// If we make 10 transactions, it should checkpoint again
for (int i = 0; i < 10; i++) {
fs.mkdirs(new Path("/test" + i));
}
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
LOG.info("Waiting for checkpoint txn id to go > 2");
return storage.getMostRecentCheckpointTxId() > 2;
}
}, 200, 15000);
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Test case where the secondary does a checkpoint, then stops for a while.
* In the meantime, the NN saves its image several times, so that the
* logs that connect the 2NN's old checkpoint to the current txid
* get archived. Then, the 2NN tries to checkpoint again.
*/
@Test
public void testSecondaryHasVeryOutOfDateImage() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(true).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
// Now primary NN saves namespace 3 times
NamenodeProtocols nn = cluster.getNameNodeRpc();
nn.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
for (int i = 0; i < 3; i++) {
nn.saveNamespace();
}
nn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
// Now the secondary tries to checkpoint again with its
// old image in memory.
secondary.doCheckpoint();
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Regression test for HDFS-3678 "Edit log files are never being purged from 2NN"
*/
@Test
public void testSecondaryPurgesEditLogs() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY, 0);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(true)
.build();
FileSystem fs = cluster.getFileSystem();
fs.mkdirs(new Path("/foo"));
secondary = startSecondaryNameNode(conf);
// Checkpoint a few times. Doing this will cause a log roll, and thus
// several edit log segments on the 2NN.
for (int i = 0; i < 5; i++) {
secondary.doCheckpoint();
}
// Make sure there are no more edit log files than there should be.
List<File> checkpointDirs = getCheckpointCurrentDirs(secondary);
for (File checkpointDir : checkpointDirs) {
List<EditLogFile> editsFiles = FileJournalManager.matchEditLogs(
checkpointDir);
assertEquals("Edit log files were not purged from 2NN", 1,
editsFiles.size());
}
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Regression test for HDFS-3835 - "Long-lived 2NN cannot perform a
* checkpoint if security is enabled and the NN restarts without outstanding
* delegation tokens"
*/
@Test
public void testSecondaryNameNodeWithDelegationTokens() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(true).build();
assertNotNull(cluster.getNamesystem().getDelegationToken(new Text("atm")));
secondary = startSecondaryNameNode(conf);
// Checkpoint once, so the 2NN loads the DT into its in-memory sate.
secondary.doCheckpoint();
// Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
// therefore needs to download a new fsimage the next time it performs a
// checkpoint.
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
// Ensure that the 2NN can still perform a checkpoint.
secondary.doCheckpoint();
} finally {
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
/**
* Regression test for HDFS-3849. This makes sure that when we re-load the
* FSImage in the 2NN, we clear the existing leases.
*/
@Test
public void testSecondaryNameNodeWithSavedLeases() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
FSDataOutputStream fos = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(true).build();
FileSystem fs = cluster.getFileSystem();
fos = fs.create(new Path("tmpfile"));
fos.write(new byte[] { 0, 1, 2, 3 });
fos.hflush();
assertEquals(1, cluster.getNamesystem().getLeaseManager().countLease());
secondary = startSecondaryNameNode(conf);
assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease());
// Checkpoint once, so the 2NN loads the lease into its in-memory sate.
secondary.doCheckpoint();
assertEquals(1, secondary.getFSNamesystem().getLeaseManager().countLease());
fos.close();
fos = null;
// Perform a saveNamespace, so that the NN has a new fsimage, and the 2NN
// therefore needs to download a new fsimage the next time it performs a
// checkpoint.
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
// Ensure that the 2NN can still perform a checkpoint.
secondary.doCheckpoint();
// And the leases have been cleared...
assertEquals(0, secondary.getFSNamesystem().getLeaseManager().countLease());
} finally {
if (fos != null) {
fos.close();
}
cleanup(secondary);
secondary = null;
cleanup(cluster);
cluster = null;
}
}
@Test
public void testCommandLineParsing() throws ParseException {
SecondaryNameNode.CommandLineOpts opts =
new SecondaryNameNode.CommandLineOpts();
opts.parse();
assertNull(opts.getCommand());
opts.parse("-checkpoint");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,
opts.getCommand());
assertFalse(opts.shouldForceCheckpoint());
opts.parse("-checkpoint", "force");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.CHECKPOINT,
opts.getCommand());
assertTrue(opts.shouldForceCheckpoint());
opts.parse("-geteditsize");
assertEquals(SecondaryNameNode.CommandLineOpts.Command.GETEDITSIZE,
opts.getCommand());
opts.parse("-format");
assertTrue(opts.shouldFormat());
try {
opts.parse("-geteditsize", "-checkpoint");
fail("Should have failed bad parsing for two actions");
} catch (ParseException e) {
LOG.warn("Encountered ", e);
}
try {
opts.parse("-checkpoint", "xx");
fail("Should have failed for bad checkpoint arg");
} catch (ParseException e) {
LOG.warn("Encountered ", e);
}
}
@Test
public void testLegacyOivImage() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File tmpDir = Files.createTempDir();
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_LEGACY_OIV_IMAGE_DIR_KEY,
tmpDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_NUM_CHECKPOINTS_RETAINED_KEY,
"2");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(true).build();
secondary = startSecondaryNameNode(conf);
// Checkpoint once
secondary.doCheckpoint();
String files1[] = tmpDir.list();
assertEquals("Only one file is expected", 1, files1.length);
// Perform more checkpointngs and check whether retention management
// is working.
secondary.doCheckpoint();
secondary.doCheckpoint();
String files2[] = tmpDir.list();
assertEquals("Two files are expected", 2, files2.length);
// Verify that the first file is deleted.
for (String fName : files2) {
assertFalse(fName.equals(files1[0]));
}
} finally {
cleanup(secondary);
cleanup(cluster);
tmpDir.delete();
}
}
private static void cleanup(SecondaryNameNode snn) {
if (snn != null) {
try {
snn.shutdown();
} catch (Exception e) {
LOG.warn("Could not shut down secondary namenode", e);
}
}
}
private static void cleanup(MiniDFSCluster cluster) {
if (cluster != null) {
try {
cluster.shutdown();
} catch (Exception e) {
LOG.warn("Could not shutdown MiniDFSCluster ", e);
}
}
}
/**
* Assert that if any two files have the same name across the 2NNs
* and NN, they should have the same content too.
*/
private void assertParallelFilesInvariant(MiniDFSCluster cluster,
ImmutableList<SecondaryNameNode> secondaries) throws Exception {
List<File> allCurrentDirs = Lists.newArrayList();
allCurrentDirs.addAll(getNameNodeCurrentDirs(cluster, 0));
for (SecondaryNameNode snn : secondaries) {
allCurrentDirs.addAll(getCheckpointCurrentDirs(snn));
}
FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs,
ImmutableSet.of("VERSION"));
}
private static List<File> getCheckpointCurrentDirs(SecondaryNameNode secondary) {
List<File> ret = Lists.newArrayList();
for (String u : secondary.getCheckpointDirectories()) {
File checkpointDir = new File(URI.create(u).getPath());
ret.add(new File(checkpointDir, "current"));
}
return ret;
}
private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) {
CheckpointStorage spy = Mockito.spy((CheckpointStorage)secondary1.getFSImage());;
secondary1.setFSImage(spy);
return spy;
}
/**
* A utility class to perform a checkpoint in a different thread.
*/
private static class DoCheckpointThread extends Thread {
private final SecondaryNameNode snn;
private volatile Throwable thrown = null;
DoCheckpointThread(SecondaryNameNode snn) {
this.snn = snn;
}
@Override
public void run() {
try {
snn.doCheckpoint();
} catch (Throwable t) {
thrown = t;
}
}
void propagateExceptions() {
if (thrown != null) {
throw new RuntimeException(thrown);
}
}
}
}
| 88,055
| 33.504702
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumMap;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import org.apache.commons.lang.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirType;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.io.IOUtils;
import org.mockito.Matchers;
import org.mockito.Mockito;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import com.google.common.collect.Sets;
import com.google.common.io.Files;
/**
* Utility functions for testing fsimage storage.
*/
public abstract class FSImageTestUtil {
public static final Log LOG = LogFactory.getLog(FSImageTestUtil.class);
/**
* The position in the fsimage header where the txid is
* written.
*/
private static final long IMAGE_TXID_POS = 24;
/**
* This function returns a md5 hash of a file.
*
* @param file input file
* @return The md5 string
*/
public static String getFileMD5(File file) throws IOException {
return MD5FileUtils.computeMd5ForFile(file).toString();
}
/**
* Calculate the md5sum of an image after zeroing out the transaction ID
* field in the header. This is useful for tests that want to verify
* that two checkpoints have identical namespaces.
*/
public static String getImageFileMD5IgnoringTxId(File imageFile)
throws IOException {
File tmpFile = File.createTempFile("hadoop_imagefile_tmp", "fsimage");
tmpFile.deleteOnExit();
try {
Files.copy(imageFile, tmpFile);
RandomAccessFile raf = new RandomAccessFile(tmpFile, "rw");
try {
raf.seek(IMAGE_TXID_POS);
raf.writeLong(0);
} finally {
IOUtils.closeStream(raf);
}
return getFileMD5(tmpFile);
} finally {
tmpFile.delete();
}
}
public static StorageDirectory mockStorageDirectory(
File currentDir, NameNodeDirType type) {
// Mock the StorageDirectory interface to just point to this file
StorageDirectory sd = Mockito.mock(StorageDirectory.class);
Mockito.doReturn(type)
.when(sd).getStorageDirType();
Mockito.doReturn(currentDir).when(sd).getCurrentDir();
Mockito.doReturn(currentDir).when(sd).getRoot();
Mockito.doReturn(mockFile(true)).when(sd).getVersionFile();
Mockito.doReturn(mockFile(false)).when(sd).getPreviousDir();
return sd;
}
/**
* Make a mock storage directory that returns some set of file contents.
* @param type type of storage dir
* @param previousExists should we mock that the previous/ dir exists?
* @param fileNames the names of files contained in current/
*/
static StorageDirectory mockStorageDirectory(
StorageDirType type,
boolean previousExists,
String... fileNames) {
StorageDirectory sd = mock(StorageDirectory.class);
doReturn(type).when(sd).getStorageDirType();
// Version file should always exist
doReturn(mockFile(true)).when(sd).getVersionFile();
doReturn(mockFile(true)).when(sd).getRoot();
// Previous dir optionally exists
doReturn(mockFile(previousExists))
.when(sd).getPreviousDir();
// Return a mock 'current' directory which has the given paths
File[] files = new File[fileNames.length];
for (int i = 0; i < fileNames.length; i++) {
files[i] = new File(fileNames[i]);
}
File mockDir = Mockito.spy(new File("/dir/current"));
doReturn(files).when(mockDir).listFiles();
doReturn(mockDir).when(sd).getCurrentDir();
return sd;
}
static File mockFile(boolean exists) {
File mockFile = mock(File.class);
doReturn(exists).when(mockFile).exists();
return mockFile;
}
public static FSImageTransactionalStorageInspector inspectStorageDirectory(
File dir, NameNodeDirType dirType) throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector();
inspector.inspectDirectory(mockStorageDirectory(dir, dirType));
return inspector;
}
/**
* Return a standalone instance of FSEditLog that will log into the given
* log directory. The returned instance is not yet opened.
*/
public static FSEditLog createStandaloneEditLog(File logDir)
throws IOException {
assertTrue(logDir.mkdirs() || logDir.exists());
if (!FileUtil.fullyDeleteContents(logDir)) {
throw new IOException("Unable to delete contents of " + logDir);
}
NNStorage storage = Mockito.mock(NNStorage.class);
StorageDirectory sd
= FSImageTestUtil.mockStorageDirectory(logDir, NameNodeDirType.EDITS);
List<StorageDirectory> sds = Lists.newArrayList(sd);
Mockito.doReturn(sds).when(storage).dirIterable(NameNodeDirType.EDITS);
Mockito.doReturn(sd).when(storage)
.getStorageDirectory(Matchers.<URI>anyObject());
FSEditLog editLog = new FSEditLog(new Configuration(),
storage,
ImmutableList.of(logDir.toURI()));
editLog.initJournalsForWrite();
return editLog;
}
/**
* Create an aborted in-progress log in the given directory, containing
* only a specified number of "mkdirs" operations.
*/
public static void createAbortedLogWithMkdirs(File editsLogDir, int numDirs,
long firstTxId, long newInodeId) throws IOException {
FSEditLog editLog = FSImageTestUtil.createStandaloneEditLog(editsLogDir);
editLog.setNextTxId(firstTxId);
editLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
PermissionStatus perms = PermissionStatus.createImmutable("fakeuser", "fakegroup",
FsPermission.createImmutable((short)0755));
for (int i = 1; i <= numDirs; i++) {
String dirName = "dir" + i;
INodeDirectory dir = new INodeDirectory(newInodeId + i - 1,
DFSUtil.string2Bytes(dirName), perms, 0L);
editLog.logMkDir("/" + dirName, dir);
}
editLog.logSync();
editLog.abortCurrentLogSegment();
}
/**
* @param editLog a path of an edit log file
* @return the count of each type of operation in the log file
* @throws Exception if there is an error reading it
*/
public static EnumMap<FSEditLogOpCodes,Holder<Integer>> countEditLogOpTypes(
File editLog) throws Exception {
EditLogInputStream elis = new EditLogFileInputStream(editLog);
try {
return countEditLogOpTypes(elis);
} finally {
IOUtils.closeStream(elis);
}
}
/**
* @see #countEditLogOpTypes(File)
*/
public static EnumMap<FSEditLogOpCodes, Holder<Integer>> countEditLogOpTypes(
EditLogInputStream elis) throws IOException {
EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);
FSEditLogOp op;
while ((op = elis.readOp()) != null) {
Holder<Integer> i = opCounts.get(op.opCode);
if (i == null) {
i = new Holder<Integer>(0);
opCounts.put(op.opCode, i);
}
i.held++;
}
return opCounts;
}
/**
* Assert that all of the given directories have the same newest filename
* for fsimage that they hold the same data.
*/
public static void assertSameNewestImage(List<File> dirs) throws Exception {
if (dirs.size() < 2) return;
long imageTxId = -1;
List<File> imageFiles = new ArrayList<File>();
for (File dir : dirs) {
FSImageTransactionalStorageInspector inspector =
inspectStorageDirectory(dir, NameNodeDirType.IMAGE);
List<FSImageFile> latestImages = inspector.getLatestImages();
assert(!latestImages.isEmpty());
long thisTxId = latestImages.get(0).getCheckpointTxId();
if (imageTxId != -1 && thisTxId != imageTxId) {
fail("Storage directory " + dir + " does not have the same " +
"last image index " + imageTxId + " as another");
}
imageTxId = thisTxId;
imageFiles.add(inspector.getLatestImages().get(0).getFile());
}
assertFileContentsSame(imageFiles.toArray(new File[0]));
}
/**
* Given a list of directories, assert that any files that are named
* the same thing have the same contents. For example, if a file
* named "fsimage_1" shows up in more than one directory, then it must
* be the same.
* @throws Exception
*/
public static void assertParallelFilesAreIdentical(List<File> dirs,
Set<String> ignoredFileNames) throws Exception {
HashMap<String, List<File>> groupedByName = new HashMap<String, List<File>>();
for (File dir : dirs) {
for (File f : dir.listFiles()) {
if (ignoredFileNames.contains(f.getName())) {
continue;
}
List<File> fileList = groupedByName.get(f.getName());
if (fileList == null) {
fileList = new ArrayList<File>();
groupedByName.put(f.getName(), fileList);
}
fileList.add(f);
}
}
for (List<File> sameNameList : groupedByName.values()) {
if (sameNameList.get(0).isDirectory()) {
// recurse
assertParallelFilesAreIdentical(sameNameList, ignoredFileNames);
} else {
if ("VERSION".equals(sameNameList.get(0).getName())) {
assertPropertiesFilesSame(sameNameList.toArray(new File[0]));
} else {
assertFileContentsSame(sameNameList.toArray(new File[0]));
}
}
}
}
/**
* Assert that a set of properties files all contain the same data.
* We cannot simply check the md5sums here, since Properties files
* contain timestamps -- thus, two properties files from the same
* saveNamespace operation may actually differ in md5sum.
* @param propFiles the files to compare
* @throws IOException if the files cannot be opened or read
* @throws AssertionError if the files differ
*/
public static void assertPropertiesFilesSame(File[] propFiles)
throws IOException {
Set<Map.Entry<Object, Object>> prevProps = null;
for (File f : propFiles) {
Properties props;
FileInputStream is = new FileInputStream(f);
try {
props = new Properties();
props.load(is);
} finally {
IOUtils.closeStream(is);
}
if (prevProps == null) {
prevProps = props.entrySet();
} else {
Set<Entry<Object,Object>> diff =
Sets.symmetricDifference(prevProps, props.entrySet());
if (!diff.isEmpty()) {
fail("Properties file " + f + " differs from " + propFiles[0]);
}
}
}
}
/**
* Assert that all of the given paths have the exact same
* contents
*/
public static void assertFileContentsSame(File... files) throws Exception {
if (files.length < 2) return;
Map<File, String> md5s = getFileMD5s(files);
if (Sets.newHashSet(md5s.values()).size() > 1) {
fail("File contents differed:\n " +
Joiner.on("\n ")
.withKeyValueSeparator("=")
.join(md5s));
}
}
/**
* Assert that the given files are not all the same, and in fact that
* they have <code>expectedUniqueHashes</code> unique contents.
*/
public static void assertFileContentsDifferent(
int expectedUniqueHashes,
File... files) throws Exception
{
Map<File, String> md5s = getFileMD5s(files);
if (Sets.newHashSet(md5s.values()).size() != expectedUniqueHashes) {
fail("Expected " + expectedUniqueHashes + " different hashes, got:\n " +
Joiner.on("\n ")
.withKeyValueSeparator("=")
.join(md5s));
}
}
public static Map<File, String> getFileMD5s(File... files) throws Exception {
Map<File, String> ret = Maps.newHashMap();
for (File f : files) {
assertTrue("Must exist: " + f, f.exists());
ret.put(f, getFileMD5(f));
}
return ret;
}
/**
* @return a List which contains the "current" dir for each storage
* directory of the given type.
*/
public static List<File> getCurrentDirs(NNStorage storage,
NameNodeDirType type) {
List<File> ret = Lists.newArrayList();
for (StorageDirectory sd : storage.dirIterable(type)) {
ret.add(sd.getCurrentDir());
}
return ret;
}
/**
* @return the fsimage file with the most recent transaction ID in the
* given storage directory.
*/
public static File findLatestImageFile(StorageDirectory sd)
throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector();
inspector.inspectDirectory(sd);
return inspector.getLatestImages().get(0).getFile();
}
/**
* @return the fsimage file with the most recent transaction ID in the
* given 'current/' directory.
*/
public static File findNewestImageFile(String currentDirPath) throws IOException {
StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(
new File(currentDirPath), NameNodeDirType.IMAGE);
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector();
inspector.inspectDirectory(sd);
List<FSImageFile> latestImages = inspector.getLatestImages();
return (latestImages.isEmpty()) ? null : latestImages.get(0).getFile();
}
/**
* Assert that the NameNode has checkpoints at the expected
* transaction IDs.
*/
public static void assertNNHasCheckpoints(MiniDFSCluster cluster,
List<Integer> txids) {
assertNNHasCheckpoints(cluster, 0, txids);
}
public static void assertNNHasCheckpoints(MiniDFSCluster cluster,
int nnIdx, List<Integer> txids) {
for (File nameDir : getNameNodeCurrentDirs(cluster, nnIdx)) {
LOG.info("examining name dir with files: " +
Joiner.on(",").join(nameDir.listFiles()));
// Should have fsimage_N for the three checkpoints
LOG.info("Examining storage dir " + nameDir + " with contents: "
+ StringUtils.join(nameDir.listFiles(), ", "));
for (long checkpointTxId : txids) {
File image = new File(nameDir,
NNStorage.getImageFileName(checkpointTxId));
assertTrue("Expected non-empty " + image, image.length() > 0);
}
}
}
public static List<File> getNameNodeCurrentDirs(MiniDFSCluster cluster, int nnIdx) {
List<File> nameDirs = Lists.newArrayList();
for (URI u : cluster.getNameDirs(nnIdx)) {
nameDirs.add(new File(u.getPath(), "current"));
}
return nameDirs;
}
/**
* @return the latest edits log, finalized or otherwise, from the given
* storage directory.
*/
public static EditLogFile findLatestEditsLog(StorageDirectory sd)
throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> foundEditLogs
= Lists.newArrayList(FileJournalManager.matchEditLogs(currentDir));
return Collections.max(foundEditLogs, EditLogFile.COMPARE_BY_START_TXID);
}
/**
* Corrupt the given VERSION file by replacing a given
* key with a new value and re-writing the file.
*
* @param versionFile the VERSION file to corrupt
* @param key the key to replace
* @param value the new value for this key
*/
public static void corruptVersionFile(File versionFile, String key, String value)
throws IOException {
Properties props = new Properties();
FileInputStream fis = new FileInputStream(versionFile);
FileOutputStream out = null;
try {
props.load(fis);
IOUtils.closeStream(fis);
if (value == null || value.isEmpty()) {
props.remove(key);
} else {
props.setProperty(key, value);
}
out = new FileOutputStream(versionFile);
props.store(out, null);
} finally {
IOUtils.cleanup(null, fis, out);
}
}
public static void assertReasonableNameCurrentDir(File curDir)
throws IOException {
assertTrue(curDir.isDirectory());
assertTrue(new File(curDir, "VERSION").isFile());
assertTrue(new File(curDir, "seen_txid").isFile());
File image = findNewestImageFile(curDir.toString());
assertNotNull(image);
}
public static void logStorageContents(Log LOG, NNStorage storage) {
LOG.info("current storages and corresponding sizes:");
for (StorageDirectory sd : storage.dirIterable(null)) {
File curDir = sd.getCurrentDir();
LOG.info("In directory " + curDir);
File[] files = curDir.listFiles();
Arrays.sort(files);
for (File f : files) {
LOG.info(" file " + f.getAbsolutePath() + "; len = " + f.length());
}
}
}
/** get the fsImage*/
public static FSImage getFSImage(NameNode node) {
return node.getFSImage();
}
/**
* get NameSpace quota.
*/
public static long getNSQuota(FSNamesystem ns) {
return ns.dir.rootDir.getQuotaCounts().getNameSpace();
}
public static void assertNNFilesMatch(MiniDFSCluster cluster) throws Exception {
List<File> curDirs = Lists.newArrayList();
curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0));
curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1));
// Ignore seen_txid file, since the newly bootstrapped standby
// will have a higher seen_txid than the one it bootstrapped from.
Set<String> ignoredFiles = ImmutableSet.of("seen_txid");
FSImageTestUtil.assertParallelFilesAreIdentical(curDirs,
ignoredFiles);
}
}
| 19,639
| 33.335664
| 86
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileTruncate.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.hamcrest.CoreMatchers.equalTo;
import static org.hamcrest.CoreMatchers.not;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.concurrent.ThreadLocalRandom;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestFileTruncate {
static {
GenericTestUtils.setLogLevel(NameNode.stateChangeLog, Level.ALL);
GenericTestUtils.setLogLevel(FSEditLogLoader.LOG, Level.ALL);
}
static final Log LOG = LogFactory.getLog(TestFileTruncate.class);
static final int BLOCK_SIZE = 4;
static final short REPLICATION = 3;
static final int DATANODE_NUM = 3;
static final int SUCCESS_ATTEMPTS = 300;
static final int RECOVERY_ATTEMPTS = 600;
static final long SLEEP = 100L;
static final long LOW_SOFTLIMIT = 100L;
static final long LOW_HARDLIMIT = 200L;
static final int SHORT_HEARTBEAT = 1;
static Configuration conf;
static MiniDFSCluster cluster;
static DistributedFileSystem fs;
private Path parent;
@Before
public void setUp() throws IOException {
conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCK_SIZE);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, SHORT_HEARTBEAT);
conf.setLong(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf)
.format(true)
.numDataNodes(DATANODE_NUM)
.nameNodePort(NameNode.DEFAULT_PORT)
.waitSafeMode(true)
.build();
fs = cluster.getFileSystem();
parent = new Path("/test");
}
@After
public void tearDown() throws IOException {
if(fs != null) fs.close();
if(cluster != null) cluster.shutdown();
}
/**
* Truncate files of different sizes byte by byte.
*/
@Test
public void testBasicTruncate() throws IOException {
int startingFileSize = 3 * BLOCK_SIZE;
fs.mkdirs(parent);
fs.setQuota(parent, 100, 1000);
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
for (int fileLength = startingFileSize; fileLength > 0;
fileLength -= BLOCK_SIZE - 1) {
for (int toTruncate = 0; toTruncate <= fileLength; toTruncate++) {
final Path p = new Path(parent, "testBasicTruncate" + fileLength);
writeContents(contents, fileLength, p);
int newLength = fileLength - toTruncate;
boolean isReady = fs.truncate(p, newLength);
LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
+ ", toTruncate=" + toTruncate + ", isReady=" + isReady);
assertEquals("File must be closed for zero truncate"
+ " or truncating at the block boundary",
isReady, toTruncate == 0 || newLength % BLOCK_SIZE == 0);
if (!isReady) {
checkBlockRecovery(p);
}
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage",
cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
checkFullFile(p, newLength, contents);
}
}
fs.delete(parent, true);
}
/** Truncate the same file multiple times until its size is zero. */
@Test
public void testMultipleTruncate() throws IOException {
Path dir = new Path("/testMultipleTruncate");
fs.mkdirs(dir);
final Path p = new Path(dir, "file");
final byte[] data = new byte[100 * BLOCK_SIZE];
ThreadLocalRandom.current().nextBytes(data);
writeContents(data, data.length, p);
for(int n = data.length; n > 0; ) {
final int newLength = ThreadLocalRandom.current().nextInt(n);
final boolean isReady = fs.truncate(p, newLength);
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
assertEquals("File must be closed for truncating at the block boundary",
isReady, newLength % BLOCK_SIZE == 0);
assertEquals("Truncate is not idempotent",
isReady, fs.truncate(p, newLength));
if (!isReady) {
checkBlockRecovery(p);
}
checkFullFile(p, newLength, data);
n = newLength;
}
fs.delete(dir, true);
}
/** Truncate the same file multiple times until its size is zero. */
@Test
public void testSnapshotTruncateThenDeleteSnapshot() throws IOException {
Path dir = new Path("/testSnapshotTruncateThenDeleteSnapshot");
fs.mkdirs(dir);
fs.allowSnapshot(dir);
final Path p = new Path(dir, "file");
final byte[] data = new byte[BLOCK_SIZE];
ThreadLocalRandom.current().nextBytes(data);
writeContents(data, data.length, p);
final String snapshot = "s0";
fs.createSnapshot(dir, snapshot);
Block lastBlock = getLocatedBlocks(p).getLastLocatedBlock()
.getBlock().getLocalBlock();
final int newLength = data.length - 1;
assert newLength % BLOCK_SIZE != 0 :
" newLength must not be multiple of BLOCK_SIZE";
final boolean isReady = fs.truncate(p, newLength);
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
assertEquals("File must be closed for truncating at the block boundary",
isReady, newLength % BLOCK_SIZE == 0);
fs.deleteSnapshot(dir, snapshot);
if (!isReady) {
checkBlockRecovery(p);
}
checkFullFile(p, newLength, data);
assertBlockNotPresent(lastBlock);
fs.delete(dir, true);
}
/**
* Truncate files and then run other operations such as
* rename, set replication, set permission, etc.
*/
@Test
public void testTruncateWithOtherOperations() throws IOException {
Path dir = new Path("/testTruncateOtherOperations");
fs.mkdirs(dir);
final Path p = new Path(dir, "file");
final byte[] data = new byte[2 * BLOCK_SIZE];
ThreadLocalRandom.current().nextBytes(data);
writeContents(data, data.length, p);
final int newLength = data.length - 1;
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
fs.setReplication(p, (short)(REPLICATION - 1));
fs.setPermission(p, FsPermission.createImmutable((short)0444));
final Path q = new Path(dir, "newFile");
fs.rename(p, q);
checkBlockRecovery(q);
checkFullFile(q, newLength, data);
cluster.restartNameNode();
checkFullFile(q, newLength, data);
fs.delete(dir, true);
}
@Test
public void testSnapshotWithAppendTruncate() throws IOException {
testSnapshotWithAppendTruncate(0, 1, 2);
testSnapshotWithAppendTruncate(0, 2, 1);
testSnapshotWithAppendTruncate(1, 0, 2);
testSnapshotWithAppendTruncate(1, 2, 0);
testSnapshotWithAppendTruncate(2, 0, 1);
testSnapshotWithAppendTruncate(2, 1, 0);
}
/**
* Create three snapshots with appended and truncated file.
* Delete snapshots in the specified order and verify that
* remaining snapshots are still readable.
*/
void testSnapshotWithAppendTruncate(int ... deleteOrder) throws IOException {
FSDirectory fsDir = cluster.getNamesystem().getFSDirectory();
fs.mkdirs(parent);
fs.setQuota(parent, 100, 1000);
fs.allowSnapshot(parent);
String truncateFile = "testSnapshotWithAppendTruncate";
final Path src = new Path(parent, truncateFile);
int[] length = new int[4];
length[0] = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
DFSTestUtil.createFile(fs, src, 64, length[0], BLOCK_SIZE, REPLICATION, 0L);
Block firstBlk = getLocatedBlocks(src).get(0).getBlock().getLocalBlock();
Path[] snapshotFiles = new Path[4];
// Diskspace consumed should be 10 bytes * 3. [blk 1,2,3]
ContentSummary contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(30L));
// Add file to snapshot and append
String[] ss = new String[] {"ss0", "ss1", "ss2", "ss3"};
Path snapshotDir = fs.createSnapshot(parent, ss[0]);
snapshotFiles[0] = new Path(snapshotDir, truncateFile);
length[1] = length[2] = length[0] + BLOCK_SIZE + 1;
DFSTestUtil.appendFile(fs, src, BLOCK_SIZE + 1);
Block lastBlk = getLocatedBlocks(src).getLastLocatedBlock()
.getBlock().getLocalBlock();
// Diskspace consumed should be 15 bytes * 3. [blk 1,2,3,4]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(45L));
// Create another snapshot without changes
snapshotDir = fs.createSnapshot(parent, ss[1]);
snapshotFiles[1] = new Path(snapshotDir, truncateFile);
// Create another snapshot and append
snapshotDir = fs.createSnapshot(parent, ss[2]);
snapshotFiles[2] = new Path(snapshotDir, truncateFile);
DFSTestUtil.appendFile(fs, src, BLOCK_SIZE -1 + BLOCK_SIZE / 2);
Block appendedBlk = getLocatedBlocks(src).getLastLocatedBlock()
.getBlock().getLocalBlock();
// Diskspace consumed should be 20 bytes * 3. [blk 1,2,3,4,5]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(60L));
// Truncate to block boundary
int newLength = length[0] + BLOCK_SIZE / 2;
boolean isReady = fs.truncate(src, newLength);
assertTrue("Recovery is not expected.", isReady);
assertFileLength(snapshotFiles[2], length[2]);
assertFileLength(snapshotFiles[1], length[1]);
assertFileLength(snapshotFiles[0], length[0]);
assertBlockNotPresent(appendedBlk);
// Diskspace consumed should be 16 bytes * 3. [blk 1,2,3 SS:4]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(48L));
// Truncate full block again
newLength = length[0] - BLOCK_SIZE / 2;
isReady = fs.truncate(src, newLength);
assertTrue("Recovery is not expected.", isReady);
assertFileLength(snapshotFiles[2], length[2]);
assertFileLength(snapshotFiles[1], length[1]);
assertFileLength(snapshotFiles[0], length[0]);
// Diskspace consumed should be 16 bytes * 3. [blk 1,2 SS:3,4]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(48L));
// Truncate half of the last block
newLength -= BLOCK_SIZE / 2;
isReady = fs.truncate(src, newLength);
assertFalse("Recovery is expected.", isReady);
checkBlockRecovery(src);
assertFileLength(snapshotFiles[2], length[2]);
assertFileLength(snapshotFiles[1], length[1]);
assertFileLength(snapshotFiles[0], length[0]);
Block replacedBlk = getLocatedBlocks(src).getLastLocatedBlock()
.getBlock().getLocalBlock();
// Diskspace consumed should be 16 bytes * 3. [blk 1,6 SS:2,3,4]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(54L));
snapshotDir = fs.createSnapshot(parent, ss[3]);
snapshotFiles[3] = new Path(snapshotDir, truncateFile);
length[3] = newLength;
// Delete file. Should still be able to read snapshots
int numINodes = fsDir.getInodeMapSize();
isReady = fs.delete(src, false);
assertTrue("Delete failed.", isReady);
assertFileLength(snapshotFiles[3], length[3]);
assertFileLength(snapshotFiles[2], length[2]);
assertFileLength(snapshotFiles[1], length[1]);
assertFileLength(snapshotFiles[0], length[0]);
assertEquals("Number of INodes should not change",
numINodes, fsDir.getInodeMapSize());
fs.deleteSnapshot(parent, ss[3]);
assertBlockExists(firstBlk);
assertBlockExists(lastBlk);
assertBlockNotPresent(replacedBlk);
// Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(48L));
// delete snapshots in the specified order
fs.deleteSnapshot(parent, ss[deleteOrder[0]]);
assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]);
assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
assertBlockExists(firstBlk);
assertBlockExists(lastBlk);
assertEquals("Number of INodes should not change",
numINodes, fsDir.getInodeMapSize());
// Diskspace consumed should be 16 bytes * 3. [SS:1,2,3,4]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(48L));
fs.deleteSnapshot(parent, ss[deleteOrder[1]]);
assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
assertBlockExists(firstBlk);
contentSummary = fs.getContentSummary(parent);
if(fs.exists(snapshotFiles[0])) {
// Diskspace consumed should be 0 bytes * 3. [SS:1,2,3]
assertBlockNotPresent(lastBlk);
assertThat(contentSummary.getSpaceConsumed(), is(36L));
} else {
// Diskspace consumed should be 48 bytes * 3. [SS:1,2,3,4]
assertThat(contentSummary.getSpaceConsumed(), is(48L));
}
assertEquals("Number of INodes should not change",
numINodes, fsDir .getInodeMapSize());
fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
assertBlockNotPresent(firstBlk);
assertBlockNotPresent(lastBlk);
// Diskspace consumed should be 0 bytes * 3. []
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(0L));
assertNotEquals("Number of INodes should change",
numINodes, fsDir.getInodeMapSize());
}
/**
* Create three snapshots with file truncated 3 times.
* Delete snapshots in the specified order and verify that
* remaining snapshots are still readable.
*/
@Test
public void testSnapshotWithTruncates() throws IOException {
testSnapshotWithTruncates(0, 1, 2);
testSnapshotWithTruncates(0, 2, 1);
testSnapshotWithTruncates(1, 0, 2);
testSnapshotWithTruncates(1, 2, 0);
testSnapshotWithTruncates(2, 0, 1);
testSnapshotWithTruncates(2, 1, 0);
}
void testSnapshotWithTruncates(int ... deleteOrder) throws IOException {
fs.mkdirs(parent);
fs.setQuota(parent, 100, 1000);
fs.allowSnapshot(parent);
String truncateFile = "testSnapshotWithTruncates";
final Path src = new Path(parent, truncateFile);
int[] length = new int[3];
length[0] = 3 * BLOCK_SIZE;
DFSTestUtil.createFile(fs, src, 64, length[0], BLOCK_SIZE, REPLICATION, 0L);
Block firstBlk = getLocatedBlocks(src).get(0).getBlock().getLocalBlock();
Block lastBlk = getLocatedBlocks(src).getLastLocatedBlock()
.getBlock().getLocalBlock();
Path[] snapshotFiles = new Path[3];
// Diskspace consumed should be 12 bytes * 3. [blk 1,2,3]
ContentSummary contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(36L));
// Add file to snapshot and append
String[] ss = new String[] {"ss0", "ss1", "ss2"};
Path snapshotDir = fs.createSnapshot(parent, ss[0]);
snapshotFiles[0] = new Path(snapshotDir, truncateFile);
length[1] = 2 * BLOCK_SIZE;
boolean isReady = fs.truncate(src, 2 * BLOCK_SIZE);
assertTrue("Recovery is not expected.", isReady);
// Diskspace consumed should be 12 bytes * 3. [blk 1,2 SS:3]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(36L));
snapshotDir = fs.createSnapshot(parent, ss[1]);
snapshotFiles[1] = new Path(snapshotDir, truncateFile);
// Create another snapshot with truncate
length[2] = BLOCK_SIZE + BLOCK_SIZE / 2;
isReady = fs.truncate(src, BLOCK_SIZE + BLOCK_SIZE / 2);
assertFalse("Recovery is expected.", isReady);
checkBlockRecovery(src);
snapshotDir = fs.createSnapshot(parent, ss[2]);
snapshotFiles[2] = new Path(snapshotDir, truncateFile);
assertFileLength(snapshotFiles[0], length[0]);
assertBlockExists(lastBlk);
// Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(42L));
fs.deleteSnapshot(parent, ss[deleteOrder[0]]);
assertFileLength(snapshotFiles[deleteOrder[1]], length[deleteOrder[1]]);
assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
assertFileLength(src, length[2]);
assertBlockExists(firstBlk);
contentSummary = fs.getContentSummary(parent);
if(fs.exists(snapshotFiles[0])) {
// Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
assertThat(contentSummary.getSpaceConsumed(), is(42L));
assertBlockExists(lastBlk);
} else {
// Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2]
assertThat(contentSummary.getSpaceConsumed(), is(30L));
assertBlockNotPresent(lastBlk);
}
fs.deleteSnapshot(parent, ss[deleteOrder[1]]);
assertFileLength(snapshotFiles[deleteOrder[2]], length[deleteOrder[2]]);
assertFileLength(src, length[2]);
assertBlockExists(firstBlk);
contentSummary = fs.getContentSummary(parent);
if(fs.exists(snapshotFiles[0])) {
// Diskspace consumed should be 14 bytes * 3. [blk 1,4 SS:2,3]
assertThat(contentSummary.getSpaceConsumed(), is(42L));
assertBlockExists(lastBlk);
} else if(fs.exists(snapshotFiles[1])) {
// Diskspace consumed should be 10 bytes * 3. [blk 1,4 SS:2]
assertThat(contentSummary.getSpaceConsumed(), is(30L));
assertBlockNotPresent(lastBlk);
} else {
// Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:]
assertThat(contentSummary.getSpaceConsumed(), is(18L));
assertBlockNotPresent(lastBlk);
}
fs.deleteSnapshot(parent, ss[deleteOrder[2]]);
assertFileLength(src, length[2]);
assertBlockExists(firstBlk);
// Diskspace consumed should be 6 bytes * 3. [blk 1,4 SS:]
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(18L));
assertThat(contentSummary.getLength(), is(6L));
fs.delete(src, false);
assertBlockNotPresent(firstBlk);
// Diskspace consumed should be 0 bytes * 3. []
contentSummary = fs.getContentSummary(parent);
assertThat(contentSummary.getSpaceConsumed(), is(0L));
}
/**
* Failure / recovery test for truncate.
* In this failure the DNs fail to recover the blocks and the NN triggers
* lease recovery.
* File stays in RecoveryInProgress until DataNodes report recovery.
*/
@Test
public void testTruncateFailure() throws IOException {
int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
int toTruncate = 1;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path dir = new Path("/dir");
final Path p = new Path(dir, "testTruncateFailure");
{
FSDataOutputStream out = fs.create(p, false, BLOCK_SIZE, REPLICATION,
BLOCK_SIZE);
out.write(contents, 0, startingFileSize);
try {
fs.truncate(p, 0);
fail("Truncate must fail on open file.");
} catch (IOException expected) {
GenericTestUtils.assertExceptionContains(
"Failed to TRUNCATE_FILE", expected);
} finally {
out.close();
}
}
{
FSDataOutputStream out = fs.append(p);
try {
fs.truncate(p, 0);
fail("Truncate must fail for append.");
} catch (IOException expected) {
GenericTestUtils.assertExceptionContains(
"Failed to TRUNCATE_FILE", expected);
} finally {
out.close();
}
}
try {
fs.truncate(p, -1);
fail("Truncate must fail for a negative new length.");
} catch (HadoopIllegalArgumentException expected) {
GenericTestUtils.assertExceptionContains(
"Cannot truncate to a negative file size", expected);
}
try {
fs.truncate(p, startingFileSize + 1);
fail("Truncate must fail for a larger new length.");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains(
"Cannot truncate to a larger file size", expected);
}
try {
fs.truncate(dir, 0);
fail("Truncate must fail for a directory.");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains(
"Path is not a file", expected);
}
try {
fs.truncate(new Path(dir, "non-existing"), 0);
fail("Truncate must fail for a non-existing file.");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains(
"File does not exist", expected);
}
fs.setPermission(p, FsPermission.createImmutable((short)0664));
{
final UserGroupInformation fooUgi =
UserGroupInformation.createUserForTesting("foo", new String[]{"foo"});
try {
final FileSystem foofs = DFSTestUtil.getFileSystemAs(fooUgi, conf);
foofs.truncate(p, 0);
fail("Truncate must fail for no WRITE permission.");
} catch (Exception expected) {
GenericTestUtils.assertExceptionContains(
"Permission denied", expected);
}
}
cluster.shutdownDataNodes();
NameNodeAdapter.getLeaseManager(cluster.getNamesystem())
.setLeasePeriod(LOW_SOFTLIMIT, LOW_HARDLIMIT);
int newLength = startingFileSize - toTruncate;
boolean isReady = fs.truncate(p, newLength);
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
{
try {
fs.truncate(p, 0);
fail("Truncate must fail since a trancate is already in pregress.");
} catch (IOException expected) {
GenericTestUtils.assertExceptionContains(
"Failed to TRUNCATE_FILE", expected);
}
}
boolean recoveryTriggered = false;
for(int i = 0; i < RECOVERY_ATTEMPTS; i++) {
String leaseHolder =
NameNodeAdapter.getLeaseHolderForPath(cluster.getNameNode(),
p.toUri().getPath());
if(leaseHolder.equals(HdfsServerConstants.NAMENODE_LEASE_HOLDER)) {
recoveryTriggered = true;
break;
}
try { Thread.sleep(SLEEP); } catch (InterruptedException ignored) {}
}
assertThat("lease recovery should have occurred in ~" +
SLEEP * RECOVERY_ATTEMPTS + " ms.", recoveryTriggered, is(true));
cluster.startDataNodes(conf, DATANODE_NUM, true,
StartupOption.REGULAR, null);
cluster.waitActive();
checkBlockRecovery(p);
NameNodeAdapter.getLeaseManager(cluster.getNamesystem())
.setLeasePeriod(HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsServerConstants.LEASE_HARDLIMIT_PERIOD);
checkFullFile(p, newLength, contents);
fs.delete(p, false);
}
/**
* The last block is truncated at mid. (non copy-on-truncate)
* dn0 is shutdown before truncate and restart after truncate successful.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesRestart() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path p = new Path(parent, "testTruncateWithDataNodesRestart");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
int dn = 0;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
cluster.getDataNodes().get(dn).shutdown();
try {
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
} finally {
cluster.restartDataNode(dn, true, true);
cluster.waitActive();
}
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For non copy-on-truncate, the truncated block id is the same, but the
* GS should increase.
* The truncated block will be replicated to dn0 after it restarts.
*/
assertEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// Old replica is disregarded and replaced with the truncated one
assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.delete(parent, true);
}
/**
* The last block is truncated at mid. (copy-on-truncate)
* dn1 is shutdown before truncate and restart after truncate successful.
*/
@Test(timeout=60000)
public void testCopyOnTruncateWithDataNodesRestart() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path p = new Path(parent, "testCopyOnTruncateWithDataNodesRestart");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
fs.allowSnapshot(parent);
fs.createSnapshot(parent, "ss0");
int dn = 1;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
cluster.getDataNodes().get(dn).shutdown();
try {
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
} finally {
cluster.restartDataNode(dn, true, true);
cluster.waitActive();
}
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For copy-on-truncate, new block is made with new block id and new GS.
* The replicas of the new block is 2, then it will be replicated to dn1.
*/
assertNotEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// New block is replicated to dn1
assertEquals(cluster.getBlockFile(dn, newBlock.getBlock()).length(),
newBlock.getBlockSize());
// Old replica exists too since there is snapshot
assertEquals(cluster.getBlockFile(dn, oldBlock.getBlock()).length(),
oldBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn,
oldBlock.getBlock()).getName().endsWith(
oldBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.deleteSnapshot(parent, "ss0");
fs.delete(parent, true);
}
/**
* The last block is truncated at mid. (non copy-on-truncate)
* dn0, dn1 are restarted immediately after truncate.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesRestartImmediately() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path p = new Path(parent, "testTruncateWithDataNodesRestartImmediately");
writeContents(contents, startingFileSize, p);
LocatedBlock oldBlock = getLocatedBlocks(p).getLastLocatedBlock();
int dn0 = 0;
int dn1 = 1;
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
cluster.restartDataNode(dn0, true, true);
cluster.restartDataNode(dn1, true, true);
cluster.waitActive();
checkBlockRecovery(p);
LocatedBlock newBlock = getLocatedBlocks(p).getLastLocatedBlock();
/*
* For non copy-on-truncate, the truncated block id is the same, but the
* GS should increase.
*/
assertEquals(newBlock.getBlock().getBlockId(),
oldBlock.getBlock().getBlockId());
assertEquals(newBlock.getBlock().getGenerationStamp(),
oldBlock.getBlock().getGenerationStamp() + 1);
Thread.sleep(2000);
// trigger the second time BR to delete the corrupted replica if there's one
cluster.triggerBlockReports();
// Wait replicas come to 3
DFSTestUtil.waitReplication(fs, p, REPLICATION);
// Old replica is disregarded and replaced with the truncated one on dn0
assertEquals(cluster.getBlockFile(dn0, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn0,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Old replica is disregarded and replaced with the truncated one on dn1
assertEquals(cluster.getBlockFile(dn1, newBlock.getBlock()).length(),
newBlock.getBlockSize());
assertTrue(cluster.getBlockMetadataFile(dn1,
newBlock.getBlock()).getName().endsWith(
newBlock.getBlock().getGenerationStamp() + ".meta"));
// Validate the file
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLength));
checkFullFile(p, newLength, contents);
fs.delete(parent, true);
}
/**
* The last block is truncated at mid. (non copy-on-truncate)
* shutdown the datanodes immediately after truncate.
*/
@Test(timeout=60000)
public void testTruncateWithDataNodesShutdownImmediately() throws Exception {
int startingFileSize = 3 * BLOCK_SIZE;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
final Path p = new Path(parent, "testTruncateWithDataNodesShutdownImmediately");
writeContents(contents, startingFileSize, p);
int toTruncateLength = 1;
int newLength = startingFileSize - toTruncateLength;
boolean isReady = fs.truncate(p, newLength);
assertFalse(isReady);
cluster.shutdownDataNodes();
cluster.setDataNodesDead();
try {
for(int i = 0; i < SUCCESS_ATTEMPTS && cluster.isDataNodeUp(); i++) {
Thread.sleep(SLEEP);
}
assertFalse("All DataNodes should be down.", cluster.isDataNodeUp());
LocatedBlocks blocks = getLocatedBlocks(p);
assertTrue(blocks.isUnderConstruction());
} finally {
cluster.startDataNodes(conf, DATANODE_NUM, true,
StartupOption.REGULAR, null);
cluster.waitActive();
}
checkBlockRecovery(p);
fs.delete(parent, true);
}
/**
* EditLogOp load test for Truncate.
*/
@Test
public void testTruncateEditLogLoad() throws IOException {
// purge previously accumulated edits
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
int startingFileSize = 2 * BLOCK_SIZE + BLOCK_SIZE / 2;
int toTruncate = 1;
final String s = "/testTruncateEditLogLoad";
final Path p = new Path(s);
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
writeContents(contents, startingFileSize, p);
int newLength = startingFileSize - toTruncate;
boolean isReady = fs.truncate(p, newLength);
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
cluster.restartNameNode();
String holder = UserGroupInformation.getCurrentUser().getUserName();
cluster.getNamesystem().recoverLease(s, holder, "");
checkBlockRecovery(p);
checkFullFile(p, newLength, contents);
fs.delete(p, false);
}
/**
* Upgrade, RollBack, and restart test for Truncate.
*/
@Test
public void testUpgradeAndRestart() throws IOException {
fs.mkdirs(parent);
fs.setQuota(parent, 100, 1000);
fs.allowSnapshot(parent);
String truncateFile = "testUpgrade";
final Path p = new Path(parent, truncateFile);
int startingFileSize = 2 * BLOCK_SIZE;
int toTruncate = 1;
byte[] contents = AppendTestUtil.initBuffer(startingFileSize);
writeContents(contents, startingFileSize, p);
Path snapshotDir = fs.createSnapshot(parent, "ss0");
Path snapshotFile = new Path(snapshotDir, truncateFile);
int newLengthBeforeUpgrade = startingFileSize - toTruncate;
boolean isReady = fs.truncate(p, newLengthBeforeUpgrade);
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
checkBlockRecovery(p);
checkFullFile(p, newLengthBeforeUpgrade, contents);
assertFileLength(snapshotFile, startingFileSize);
long totalBlockBefore = cluster.getNamesystem().getBlocksTotal();
restartCluster(StartupOption.UPGRADE);
assertThat("SafeMode should be OFF",
cluster.getNamesystem().isInSafeMode(), is(false));
assertThat("NameNode should be performing upgrade.",
cluster.getNamesystem().isUpgradeFinalized(), is(false));
FileStatus fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
int newLengthAfterUpgrade = newLengthBeforeUpgrade - toTruncate;
Block oldBlk = getLocatedBlocks(p).getLastLocatedBlock()
.getBlock().getLocalBlock();
isReady = fs.truncate(p, newLengthAfterUpgrade);
assertThat("truncate should have triggered block recovery.",
isReady, is(false));
fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLengthAfterUpgrade));
assertThat("Should copy on truncate during upgrade",
getLocatedBlocks(p).getLastLocatedBlock().getBlock()
.getLocalBlock().getBlockId(), is(not(equalTo(oldBlk.getBlockId()))));
checkBlockRecovery(p);
checkFullFile(p, newLengthAfterUpgrade, contents);
assertThat("Total block count should be unchanged from copy-on-truncate",
cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
restartCluster(StartupOption.ROLLBACK);
assertThat("File does not exist " + p, fs.exists(p), is(true));
fileStatus = fs.getFileStatus(p);
assertThat(fileStatus.getLen(), is((long) newLengthBeforeUpgrade));
checkFullFile(p, newLengthBeforeUpgrade, contents);
assertThat("Total block count should be unchanged from rolling back",
cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
restartCluster(StartupOption.REGULAR);
assertThat("Total block count should be unchanged from start-up",
cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
checkFullFile(p, newLengthBeforeUpgrade, contents);
assertFileLength(snapshotFile, startingFileSize);
// empty edits and restart
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
cluster.restartNameNode(true);
assertThat("Total block count should be unchanged from start-up",
cluster.getNamesystem().getBlocksTotal(), is(totalBlockBefore));
checkFullFile(p, newLengthBeforeUpgrade, contents);
assertFileLength(snapshotFile, startingFileSize);
fs.deleteSnapshot(parent, "ss0");
fs.delete(parent, true);
assertThat("File " + p + " shouldn't exist", fs.exists(p), is(false));
}
/**
* Check truncate recovery.
*/
@Test
public void testTruncateRecovery() throws IOException {
FSNamesystem fsn = cluster.getNamesystem();
String client = "client";
String clientMachine = "clientMachine";
String src = "/test/testTruncateRecovery";
Path srcPath = new Path(src);
byte[] contents = AppendTestUtil.initBuffer(BLOCK_SIZE);
writeContents(contents, BLOCK_SIZE, srcPath);
INodesInPath iip = fsn.getFSDirectory().getINodesInPath4Write(src, true);
INodeFile file = iip.getLastINode().asFile();
long initialGenStamp = file.getLastBlock().getGenerationStamp();
// Test that prepareFileForTruncate sets up in-place truncate.
fsn.writeLock();
try {
Block oldBlock = file.getLastBlock();
Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
client, clientMachine, 1, null);
// In-place truncate uses old block id with new genStamp.
assertThat(truncateBlock.getBlockId(),
is(equalTo(oldBlock.getBlockId())));
assertThat(truncateBlock.getNumBytes(),
is(oldBlock.getNumBytes()));
assertThat(truncateBlock.getGenerationStamp(),
is(fsn.getBlockIdManager().getGenerationStampV2()));
assertThat(file.getLastBlock().getBlockUCState(),
is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock())
.getBlockRecoveryId();
assertThat(blockRecoveryId, is(initialGenStamp + 1));
fsn.getEditLog().logTruncate(
src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
} finally {
fsn.writeUnlock();
}
// Re-create file and ensure we are ready to copy on truncate
writeContents(contents, BLOCK_SIZE, srcPath);
fs.allowSnapshot(parent);
fs.createSnapshot(parent, "ss0");
iip = fsn.getFSDirectory().getINodesInPath(src, true);
file = iip.getLastINode().asFile();
file.recordModification(iip.getLatestSnapshotId(), true);
assertThat(file.isBlockInLatestSnapshot(file.getLastBlock()), is(true));
initialGenStamp = file.getLastBlock().getGenerationStamp();
// Test that prepareFileForTruncate sets up copy-on-write truncate
fsn.writeLock();
try {
Block oldBlock = file.getLastBlock();
Block truncateBlock = FSDirTruncateOp.prepareFileForTruncate(fsn, iip,
client, clientMachine, 1, null);
// Copy-on-write truncate makes new block with new id and genStamp
assertThat(truncateBlock.getBlockId(),
is(not(equalTo(oldBlock.getBlockId()))));
assertThat(truncateBlock.getNumBytes() < oldBlock.getNumBytes(),
is(true));
assertThat(truncateBlock.getGenerationStamp(),
is(fsn.getBlockIdManager().getGenerationStampV2()));
assertThat(file.getLastBlock().getBlockUCState(),
is(HdfsServerConstants.BlockUCState.UNDER_RECOVERY));
long blockRecoveryId = ((BlockInfoContiguousUnderConstruction) file.getLastBlock())
.getBlockRecoveryId();
assertThat(blockRecoveryId, is(initialGenStamp + 1));
fsn.getEditLog().logTruncate(
src, client, clientMachine, BLOCK_SIZE-1, Time.now(), truncateBlock);
} finally {
fsn.writeUnlock();
}
checkBlockRecovery(srcPath);
fs.deleteSnapshot(parent, "ss0");
fs.delete(parent, true);
}
@Test
public void testTruncateShellCommand() throws Exception {
final Path src = new Path("/test/testTruncateShellCommand");
final int oldLength = 2*BLOCK_SIZE + 1;
final int newLength = BLOCK_SIZE + 1;
String[] argv =
new String[]{"-truncate", String.valueOf(newLength), src.toString()};
runTruncateShellCommand(src, oldLength, argv);
// wait for block recovery
checkBlockRecovery(src);
assertThat(fs.getFileStatus(src).getLen(), is((long) newLength));
fs.delete(parent, true);
}
@Test
public void testTruncateShellCommandOnBlockBoundary() throws Exception {
final Path src = new Path("/test/testTruncateShellCommandOnBoundary");
final int oldLength = 2 * BLOCK_SIZE;
final int newLength = BLOCK_SIZE;
String[] argv =
new String[]{"-truncate", String.valueOf(newLength), src.toString()};
runTruncateShellCommand(src, oldLength, argv);
// shouldn't need to wait for block recovery
assertThat(fs.getFileStatus(src).getLen(), is((long) newLength));
fs.delete(parent, true);
}
@Test
public void testTruncateShellCommandWithWaitOption() throws Exception {
final Path src = new Path("/test/testTruncateShellCommandWithWaitOption");
final int oldLength = 2 * BLOCK_SIZE + 1;
final int newLength = BLOCK_SIZE + 1;
String[] argv = new String[]{"-truncate", "-w", String.valueOf(newLength),
src.toString()};
runTruncateShellCommand(src, oldLength, argv);
// shouldn't need to wait for block recovery
assertThat(fs.getFileStatus(src).getLen(), is((long) newLength));
fs.delete(parent, true);
}
private void runTruncateShellCommand(Path src, int oldLength,
String[] shellOpts) throws Exception {
// create file and write data
writeContents(AppendTestUtil.initBuffer(oldLength), oldLength, src);
assertThat(fs.getFileStatus(src).getLen(), is((long)oldLength));
// truncate file using shell
FsShell shell = null;
try {
shell = new FsShell(conf);
assertThat(ToolRunner.run(shell, shellOpts), is(0));
} finally {
if(shell != null) {
shell.close();
}
}
}
@Test
public void testTruncate4Symlink() throws IOException {
final int fileLength = 3 * BLOCK_SIZE;
fs.mkdirs(parent);
final byte[] contents = AppendTestUtil.initBuffer(fileLength);
final Path file = new Path(parent, "testTruncate4Symlink");
writeContents(contents, fileLength, file);
final Path link = new Path(parent, "link");
fs.createSymlink(file, link, false);
final int newLength = fileLength/3;
boolean isReady = fs.truncate(link, newLength);
assertTrue("Recovery is not expected.", isReady);
FileStatus fileStatus = fs.getFileStatus(file);
assertThat(fileStatus.getLen(), is((long) newLength));
ContentSummary cs = fs.getContentSummary(parent);
assertEquals("Bad disk space usage",
cs.getSpaceConsumed(), newLength * REPLICATION);
// validate the file content
checkFullFile(file, newLength, contents);
fs.delete(parent, true);
}
static void writeContents(byte[] contents, int fileLength, Path p)
throws IOException {
FSDataOutputStream out = fs.create(p, true, BLOCK_SIZE, REPLICATION,
BLOCK_SIZE);
out.write(contents, 0, fileLength);
out.close();
}
static void checkBlockRecovery(Path p) throws IOException {
checkBlockRecovery(p, fs);
}
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs)
throws IOException {
checkBlockRecovery(p, dfs, SUCCESS_ATTEMPTS, SLEEP);
}
public static void checkBlockRecovery(Path p, DistributedFileSystem dfs,
int attempts, long sleepMs) throws IOException {
boolean success = false;
for(int i = 0; i < attempts; i++) {
LocatedBlocks blocks = getLocatedBlocks(p, dfs);
boolean noLastBlock = blocks.getLastLocatedBlock() == null;
if(!blocks.isUnderConstruction() &&
(noLastBlock || blocks.isLastBlockComplete())) {
success = true;
break;
}
try { Thread.sleep(sleepMs); } catch (InterruptedException ignored) {}
}
assertThat("inode should complete in ~" + sleepMs * attempts + " ms.",
success, is(true));
}
static LocatedBlocks getLocatedBlocks(Path src) throws IOException {
return getLocatedBlocks(src, fs);
}
static LocatedBlocks getLocatedBlocks(Path src, DistributedFileSystem dfs)
throws IOException {
return dfs.getClient().getLocatedBlocks(src.toString(), 0, Long.MAX_VALUE);
}
static void assertBlockExists(Block blk) {
assertNotNull("BlocksMap does not contain block: " + blk,
cluster.getNamesystem().getStoredBlock(blk));
}
static void assertBlockNotPresent(Block blk) {
assertNull("BlocksMap should not contain block: " + blk,
cluster.getNamesystem().getStoredBlock(blk));
}
static void assertFileLength(Path file, long length) throws IOException {
byte[] data = DFSTestUtil.readFileBuffer(fs, file);
assertEquals("Wrong data size in snapshot.", length, data.length);
}
static void checkFullFile(Path p, int newLength, byte[] contents)
throws IOException {
AppendTestUtil.checkFullFile(fs, p, newLength, contents, p.toString());
}
static void restartCluster(StartupOption o)
throws IOException {
cluster.shutdown();
if(StartupOption.ROLLBACK == o)
NameNode.doRollback(conf, false);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM)
.format(false)
.nameNodePort(NameNode.DEFAULT_PORT)
.startupOption(o==StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
.dnStartupOption(o!=StartupOption.ROLLBACK ? StartupOption.REGULAR : o)
.build();
fs = cluster.getFileSystem();
}
}
| 46,984
| 37.044534
| 90
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestHostsFiles.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import java.lang.management.ManagementFactory;
import java.io.File;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.junit.Test;
/**
* DFS_HOSTS and DFS_HOSTS_EXCLUDE tests
*
*/
public class TestHostsFiles {
private static final Log LOG =
LogFactory.getLog(TestHostsFiles.class.getName());
/*
* Return a configuration object with low timeouts for testing and
* a topology script set (which enables rack awareness).
*/
private Configuration getConf() {
Configuration conf = new HdfsConfiguration();
// Lower the heart beat interval so the NN quickly learns of dead
// or decommissioned DNs and the NN issues replication and invalidation
// commands quickly (as replies to heartbeats)
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
// Have the NN ReplicationMonitor compute the replication and
// invalidation commands to send DNs every second.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
// Have the NN check for pending replications every second so it
// quickly schedules additional replicas as they are identified.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 1);
// The DNs report blocks every second.
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
// Indicates we have multiple racks
conf.set(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY, "xyz");
return conf;
}
@Test
public void testHostsExcludeInUI() throws Exception {
Configuration conf = getConf();
short REPLICATION_FACTOR = 2;
final Path filePath = new Path("/testFile");
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, "");
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
// Two blocks and four racks
String racks[] = {"/rack1", "/rack1", "/rack2", "/rack2"};
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(racks.length).racks(racks).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
try {
// Create a file with one block
final FileSystem fs = cluster.getFileSystem();
DFSTestUtil.createFile(fs, filePath, 1L, REPLICATION_FACTOR, 1L);
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, filePath);
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
// Decommission one of the hosts with the block, this should cause
// the block to get replicated to another host on the same rack,
// otherwise the rack policy is violated.
BlockLocation locs[] = fs.getFileBlockLocations(
fs.getFileStatus(filePath), 0, Long.MAX_VALUE);
String name = locs[0].getNames()[0];
String names = name + "\n" + "localhost:42\n";
LOG.info("adding '" + names + "' to exclude file " + excludeFile.toUri().getPath());
DFSTestUtil.writeFile(localFileSys, excludeFile, name);
ns.getBlockManager().getDatanodeManager().refreshNodes(conf);
DFSTestUtil.waitForDecommission(fs, name);
// Check the block still has sufficient # replicas across racks
DFSTestUtil.waitForReplication(cluster, b, 2, REPLICATION_FACTOR, 0);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName =
new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo");
String nodes = (String) mbs.getAttribute(mxbeanName, "LiveNodes");
assertTrue("Live nodes should contain the decommissioned node",
nodes.contains("Decommissioned"));
} finally {
if (cluster != null) {
cluster.shutdown();
}
if (localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
}
}
@Test
public void testHostsIncludeForDeadCount() throws Exception {
Configuration conf = getConf();
// Configure an excludes file
FileSystem localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir, "build/test/data/temp/decommission");
Path excludeFile = new Path(dir, "exclude");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
includeHosts.append("localhost:52").append("\n").append("127.0.0.1:7777")
.append("\n");
DFSTestUtil.writeFile(localFileSys, excludeFile, "");
DFSTestUtil.writeFile(localFileSys, includeFile, includeHosts.toString());
conf.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, excludeFile.toUri().getPath());
conf.set(DFSConfigKeys.DFS_HOSTS, includeFile.toUri().getPath());
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
assertTrue(ns.getNumDeadDataNodes() == 2);
assertTrue(ns.getNumLiveDataNodes() == 0);
// Testing using MBeans
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
if (localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
}
}
}
| 7,570
| 40.371585
| 90
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DF;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test;
/**
* This tests InterDataNodeProtocol for block handling.
*/
public class TestNamenodeCapacityReport {
private static final Log LOG = LogFactory.getLog(TestNamenodeCapacityReport.class);
/**
* The following test first creates a file.
* It verifies the block information from a datanode.
* Then, it updates the block with new information and verifies again.
*/
@Test
public void testVolumeSize() throws Exception {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
// Set aside fifth of the total capacity as reserved
long reserved = 10000;
conf.setLong(DFSConfigKeys.DFS_DATANODE_DU_RESERVED_KEY, reserved);
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final DatanodeManager dm = cluster.getNamesystem().getBlockManager(
).getDatanodeManager();
// Ensure the data reported for each data node is right
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
dm.fetchDatanodes(live, dead, false);
assertTrue(live.size() == 1);
long used, remaining, configCapacity, nonDFSUsed, bpUsed;
float percentUsed, percentRemaining, percentBpUsed;
for (final DatanodeDescriptor datanode : live) {
used = datanode.getDfsUsed();
remaining = datanode.getRemaining();
nonDFSUsed = datanode.getNonDfsUsed();
configCapacity = datanode.getCapacity();
percentUsed = datanode.getDfsUsedPercent();
percentRemaining = datanode.getRemainingPercent();
bpUsed = datanode.getBlockPoolUsed();
percentBpUsed = datanode.getBlockPoolUsedPercent();
LOG.info("Datanode configCapacity " + configCapacity
+ " used " + used + " non DFS used " + nonDFSUsed
+ " remaining " + remaining + " perentUsed " + percentUsed
+ " percentRemaining " + percentRemaining);
assertTrue(configCapacity == (used + remaining + nonDFSUsed));
assertTrue(percentUsed == DFSUtilClient.getPercentUsed(used,
configCapacity));
assertTrue(percentRemaining == DFSUtilClient.getPercentRemaining(
remaining, configCapacity));
assertTrue(percentBpUsed == DFSUtilClient.getPercentUsed(bpUsed,
configCapacity));
}
DF df = new DF(new File(cluster.getDataDirectory()), conf);
//
// Currently two data directories are created by the data node
// in the MiniDFSCluster. This results in each data directory having
// capacity equals to the disk capacity of the data directory.
// Hence the capacity reported by the data node is twice the disk space
// the disk capacity
//
// So multiply the disk capacity and reserved space by two
// for accommodating it
//
int numOfDataDirs = 2;
long diskCapacity = numOfDataDirs * df.getCapacity();
reserved *= numOfDataDirs;
configCapacity = namesystem.getCapacityTotal();
used = namesystem.getCapacityUsed();
nonDFSUsed = namesystem.getNonDfsUsedSpace();
remaining = namesystem.getCapacityRemaining();
percentUsed = namesystem.getPercentUsed();
percentRemaining = namesystem.getPercentRemaining();
bpUsed = namesystem.getBlockPoolUsedSpace();
percentBpUsed = namesystem.getPercentBlockPoolUsed();
LOG.info("Data node directory " + cluster.getDataDirectory());
LOG.info("Name node diskCapacity " + diskCapacity + " configCapacity "
+ configCapacity + " reserved " + reserved + " used " + used
+ " remaining " + remaining + " nonDFSUsed " + nonDFSUsed
+ " remaining " + remaining + " percentUsed " + percentUsed
+ " percentRemaining " + percentRemaining + " bpUsed " + bpUsed
+ " percentBpUsed " + percentBpUsed);
// Ensure new total capacity reported excludes the reserved space
assertTrue(configCapacity == diskCapacity - reserved);
// Ensure new total capacity reported excludes the reserved space
assertTrue(configCapacity == (used + remaining + nonDFSUsed));
// Ensure percent used is calculated based on used and present capacity
assertTrue(percentUsed == DFSUtilClient.getPercentUsed(used,
configCapacity));
// Ensure percent used is calculated based on used and present capacity
assertTrue(percentBpUsed == DFSUtilClient.getPercentUsed(bpUsed,
configCapacity));
// Ensure percent used is calculated based on used and present capacity
assertTrue(percentRemaining == ((float)remaining * 100.0f)/(float)configCapacity);
}
finally {
if (cluster != null) {cluster.shutdown();}
}
}
private static final float EPSILON = 0.0001f;
@Test
public void testXceiverCount() throws Exception {
Configuration conf = new HdfsConfiguration();
// retry one time, if close fails
conf.setInt(HdfsClientConfigKeys.BlockWrite.LOCATEFOLLOWINGBLOCK_RETRIES_KEY, 1);
MiniDFSCluster cluster = null;
final int nodes = 8;
final int fileCount = 5;
final short fileRepl = 3;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(nodes).build();
cluster.waitActive();
final FSNamesystem namesystem = cluster.getNamesystem();
final DatanodeManager dnm = namesystem.getBlockManager().getDatanodeManager();
List<DataNode> datanodes = cluster.getDataNodes();
final DistributedFileSystem fs = cluster.getFileSystem();
// trigger heartbeats in case not already sent
triggerHeartbeats(datanodes);
// check that all nodes are live and in service
int expectedTotalLoad = nodes; // xceiver server adds 1 to load
int expectedInServiceNodes = nodes;
int expectedInServiceLoad = nodes;
checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
// shutdown half the nodes and force a heartbeat check to ensure
// counts are accurate
for (int i=0; i < nodes/2; i++) {
DataNode dn = datanodes.get(i);
DatanodeDescriptor dnd = dnm.getDatanode(dn.getDatanodeId());
dn.shutdown();
DFSTestUtil.setDatanodeDead(dnd);
BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
//Verify decommission of dead node won't impact nodesInService metrics.
dnm.getDecomManager().startDecommission(dnd);
expectedInServiceNodes--;
assertEquals(expectedInServiceNodes, namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
//Verify recommission of dead node won't impact nodesInService metrics.
dnm.getDecomManager().stopDecommission(dnd);
assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
}
// restart the nodes to verify that counts are correct after
// node re-registration
cluster.restartDataNodes();
cluster.waitActive();
datanodes = cluster.getDataNodes();
expectedInServiceNodes = nodes;
assertEquals(nodes, datanodes.size());
checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
// create streams and hsync to force datastreamers to start
DFSOutputStream[] streams = new DFSOutputStream[fileCount];
for (int i=0; i < fileCount; i++) {
streams[i] = (DFSOutputStream)fs.create(new Path("/f"+i), fileRepl)
.getWrappedStream();
streams[i].write("1".getBytes());
streams[i].hsync();
// the load for writers is 2 because both the write xceiver & packet
// responder threads are counted in the load
expectedTotalLoad += 2*fileRepl;
expectedInServiceLoad += 2*fileRepl;
}
// force nodes to send load update
triggerHeartbeats(datanodes);
checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
// decomm a few nodes, substract their load from the expected load,
// trigger heartbeat to force load update
for (int i=0; i < fileRepl; i++) {
expectedInServiceNodes--;
DatanodeDescriptor dnd =
dnm.getDatanode(datanodes.get(i).getDatanodeId());
expectedInServiceLoad -= dnd.getXceiverCount();
dnm.getDecomManager().startDecommission(dnd);
DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
Thread.sleep(100);
checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
}
// check expected load while closing each stream. recalc expected
// load based on whether the nodes in the pipeline are decomm
for (int i=0; i < fileCount; i++) {
int decomm = 0;
for (DatanodeInfo dni : streams[i].getPipeline()) {
DatanodeDescriptor dnd = dnm.getDatanode(dni);
expectedTotalLoad -= 2;
if (dnd.isDecommissionInProgress() || dnd.isDecommissioned()) {
decomm++;
} else {
expectedInServiceLoad -= 2;
}
}
try {
streams[i].close();
} catch (IOException ioe) {
// nodes will go decommissioned even if there's a UC block whose
// other locations are decommissioned too. we'll ignore that
// bug for now
if (decomm < fileRepl) {
throw ioe;
}
}
triggerHeartbeats(datanodes);
// verify node count and loads
checkClusterHealth(nodes, namesystem, expectedTotalLoad, expectedInServiceNodes, expectedInServiceLoad);
}
// shutdown each node, verify node counts based on decomm state
for (int i=0; i < nodes; i++) {
DataNode dn = datanodes.get(i);
dn.shutdown();
// force it to appear dead so live count decreases
DatanodeDescriptor dnDesc = dnm.getDatanode(dn.getDatanodeId());
DFSTestUtil.setDatanodeDead(dnDesc);
BlockManagerTestUtil.checkHeartbeat(namesystem.getBlockManager());
assertEquals(nodes-1-i, namesystem.getNumLiveDataNodes());
// first few nodes are already out of service
if (i >= fileRepl) {
expectedInServiceNodes--;
}
assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
// live nodes always report load of 1. no nodes is load 0
double expectedXceiverAvg = (i == nodes-1) ? 0.0 : 1.0;
assertEquals((double)expectedXceiverAvg,
getInServiceXceiverAverage(namesystem), EPSILON);
}
// final sanity check
checkClusterHealth(0, namesystem, 0.0, 0, 0.0);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private static void checkClusterHealth(
int numOfLiveNodes,
FSNamesystem namesystem, double expectedTotalLoad,
int expectedInServiceNodes, double expectedInServiceLoad) {
assertEquals(numOfLiveNodes, namesystem.getNumLiveDataNodes());
assertEquals(expectedInServiceNodes, getNumDNInService(namesystem));
assertEquals(expectedTotalLoad, namesystem.getTotalLoad(), EPSILON);
if (expectedInServiceNodes != 0) {
assertEquals(expectedInServiceLoad / expectedInServiceNodes,
getInServiceXceiverAverage(namesystem), EPSILON);
} else {
assertEquals(0.0, getInServiceXceiverAverage(namesystem), EPSILON);
}
}
private static int getNumDNInService(FSNamesystem fsn) {
return fsn.getBlockManager().getDatanodeManager().getFSClusterStats()
.getNumDatanodesInService();
}
private static double getInServiceXceiverAverage(FSNamesystem fsn) {
return fsn.getBlockManager().getDatanodeManager().getFSClusterStats()
.getInServiceXceiverAverage();
}
private void triggerHeartbeats(List<DataNode> datanodes)
throws IOException, InterruptedException {
for (DataNode dn : datanodes) {
DataNodeTestUtils.triggerHeartbeat(dn);
}
Thread.sleep(100);
}
}
| 14,641
| 40.954155
| 112
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.TestFileCreation;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
public class TestBlockUnderConstruction {
static final String BASE_DIR = "/test/TestBlockUnderConstruction";
static final int BLOCK_SIZE = 8192; // same as TestFileCreation.blocksize
static final int NUM_BLOCKS = 5; // number of blocks to write
private static MiniDFSCluster cluster;
private static DistributedFileSystem hdfs;
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
}
@AfterClass
public static void tearDown() throws Exception {
if(hdfs != null) hdfs.close();
if(cluster != null) cluster.shutdown();
}
void writeFile(Path file, FSDataOutputStream stm, int size)
throws IOException {
long blocksBefore = stm.getPos() / BLOCK_SIZE;
TestFileCreation.writeFile(stm, BLOCK_SIZE);
// need to make sure the full block is completely flushed to the DataNodes
// (see FSOutputSummer#flush)
stm.flush();
int blocksAfter = 0;
// wait until the block is allocated by DataStreamer
BlockLocation[] locatedBlocks;
while(blocksAfter <= blocksBefore) {
locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
}
}
private void verifyFileBlocks(String file,
boolean isFileOpen) throws IOException {
FSNamesystem ns = cluster.getNamesystem();
final INodeFile inode = INodeFile.valueOf(ns.dir.getINode(file), file);
assertTrue("File " + inode.toString() +
" isUnderConstruction = " + inode.isUnderConstruction() +
" expected to be " + isFileOpen,
inode.isUnderConstruction() == isFileOpen);
BlockInfo[] blocks = inode.getBlocks();
assertTrue("File does not have blocks: " + inode.toString(),
blocks != null && blocks.length > 0);
int idx = 0;
BlockInfo curBlock;
// all blocks but the last two should be regular blocks
for(; idx < blocks.length - 2; idx++) {
curBlock = blocks[idx];
assertTrue("Block is not complete: " + curBlock,
curBlock.isComplete());
assertTrue("Block is not in BlocksMap: " + curBlock,
ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
}
// the penultimate block is either complete or
// committed if the file is not closed
if(idx > 0) {
curBlock = blocks[idx-1]; // penultimate block
assertTrue("Block " + curBlock +
" isUnderConstruction = " + inode.isUnderConstruction() +
" expected to be " + isFileOpen,
(isFileOpen && curBlock.isComplete()) ||
(!isFileOpen && !curBlock.isComplete() ==
(curBlock.getBlockUCState() ==
BlockUCState.COMMITTED)));
assertTrue("Block is not in BlocksMap: " + curBlock,
ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
}
// The last block is complete if the file is closed.
// If the file is open, the last block may be complete or not.
curBlock = blocks[idx]; // last block
if (!isFileOpen) {
assertTrue("Block " + curBlock + ", isFileOpen = " + isFileOpen,
curBlock.isComplete());
}
assertTrue("Block is not in BlocksMap: " + curBlock,
ns.getBlockManager().getStoredBlock(curBlock) == curBlock);
}
@Test
public void testBlockCreation() throws IOException {
Path file1 = new Path(BASE_DIR, "file1.dat");
FSDataOutputStream out = TestFileCreation.createFile(hdfs, file1, 3);
for(int idx = 0; idx < NUM_BLOCKS; idx++) {
// write one block
writeFile(file1, out, BLOCK_SIZE);
// verify consistency
verifyFileBlocks(file1.toString(), true);
}
// close file
out.close();
// verify consistency
verifyFileBlocks(file1.toString(), false);
}
/**
* Test NameNode.getBlockLocations(..) on reading un-closed files.
*/
@Test
public void testGetBlockLocations() throws IOException {
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
final Path p = new Path(BASE_DIR, "file2.dat");
final String src = p.toString();
final FSDataOutputStream out = TestFileCreation.createFile(hdfs, p, 3);
// write a half block
int len = BLOCK_SIZE >>> 1;
writeFile(p, out, len);
for(int i = 1; i < NUM_BLOCKS; ) {
// verify consistency
final LocatedBlocks lb = namenode.getBlockLocations(src, 0, len);
final List<LocatedBlock> blocks = lb.getLocatedBlocks();
assertEquals(i, blocks.size());
final Block b = blocks.get(blocks.size() - 1).getBlock().getLocalBlock();
assertTrue(b instanceof BlockInfoContiguousUnderConstruction);
if (++i < NUM_BLOCKS) {
// write one more block
writeFile(p, out, BLOCK_SIZE);
len += BLOCK_SIZE;
}
}
// close file
out.close();
}
}
| 6,988
| 36.778378
| 90
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.ExitUtil.ExitException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestEditLogJournalFailures {
private int editsPerformed = 0;
private MiniDFSCluster cluster;
private FileSystem fs;
/**
* Create the mini cluster for testing and sub in a custom runtime so that
* edit log journal failures don't actually cause the JVM to exit.
*/
@Before
public void setUpMiniCluster() throws IOException {
setUpMiniCluster(new HdfsConfiguration(), true);
}
public void setUpMiniCluster(Configuration conf, boolean manageNameDfsDirs)
throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.manageNameDfsDirs(manageNameDfsDirs).checkExitOnShutdown(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
@After
public void shutDownMiniCluster() throws IOException {
if (fs != null)
fs.close();
if (cluster != null) {
try {
cluster.shutdown();
} catch (ExitException ee) {
// Ignore ExitExceptions as the tests may result in the
// NameNode doing an immediate shutdown.
}
}
}
@Test
public void testSingleFailedEditsDirOnFlush() throws IOException {
assertTrue(doAnEdit());
// Invalidate one edits journal.
invalidateEditsDirAtIndex(0, true, false);
// The NN has not terminated (no ExitException thrown)
assertTrue(doAnEdit());
// A single journal failure should not result in a call to terminate
assertFalse(cluster.getNameNode().isInSafeMode());
}
@Test
public void testAllEditsDirsFailOnFlush() throws IOException {
assertTrue(doAnEdit());
// Invalidate both edits journals.
invalidateEditsDirAtIndex(0, true, false);
invalidateEditsDirAtIndex(1, true, false);
// The NN has not terminated (no ExitException thrown)
try {
doAnEdit();
fail("The previous edit could not be synced to any persistent storage, "
+ "should have halted the NN");
} catch (RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains(
"Could not sync enough journals to persistent storage. " +
"Unsynced transactions: 1", re);
}
}
@Test
public void testAllEditsDirFailOnWrite() throws IOException {
assertTrue(doAnEdit());
// Invalidate both edits journals.
invalidateEditsDirAtIndex(0, true, true);
invalidateEditsDirAtIndex(1, true, true);
// The NN has not terminated (no ExitException thrown)
try {
doAnEdit();
fail("The previous edit could not be synced to any persistent storage, "
+ " should have halted the NN");
} catch (RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains(
"Could not sync enough journals to persistent storage due to " +
"No journals available to flush. " +
"Unsynced transactions: 1", re);
}
}
@Test
public void testSingleFailedEditsDirOnSetReadyToFlush() throws IOException {
assertTrue(doAnEdit());
// Invalidate one edits journal.
invalidateEditsDirAtIndex(0, false, false);
// The NN has not terminated (no ExitException thrown)
assertTrue(doAnEdit());
// A single journal failure should not result in a call to terminate
assertFalse(cluster.getNameNode().isInSafeMode());
}
@Test
public void testSingleRequiredFailedEditsDirOnSetReadyToFlush()
throws IOException {
// Set one of the edits dirs to be required.
String[] editsDirs = cluster.getConfiguration(0).getTrimmedStrings(
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
shutDownMiniCluster();
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY, editsDirs[0]);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 0);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 0);
setUpMiniCluster(conf, true);
assertTrue(doAnEdit());
// Invalidated the one required edits journal.
invalidateEditsDirAtIndex(0, false, false);
JournalAndStream nonRequiredJas = getJournalAndStream(1);
EditLogFileOutputStream nonRequiredSpy =
spyOnStream(nonRequiredJas);
// The NN has not terminated (no ExitException thrown)
// ..and that the other stream is active.
assertTrue(nonRequiredJas.isActive());
try {
doAnEdit();
fail("A single failure of a required journal should have halted the NN");
} catch (RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains(
"setReadyToFlush failed for required journal", re);
}
// Since the required directory failed setReadyToFlush, and that
// directory was listed prior to the non-required directory,
// we should not call setReadyToFlush on the non-required
// directory. Regression test for HDFS-2874.
Mockito.verify(nonRequiredSpy, Mockito.never()).setReadyToFlush();
assertFalse(nonRequiredJas.isActive());
}
@Test
public void testMultipleRedundantFailedEditsDirOnSetReadyToFlush()
throws IOException {
// Set up 4 name/edits dirs.
shutDownMiniCluster();
Configuration conf = new HdfsConfiguration();
String[] nameDirs = new String[4];
for (int i = 0; i < nameDirs.length; i++) {
File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i);
nameDir.mkdirs();
nameDirs[i] = nameDir.getAbsolutePath();
}
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
StringUtils.join(nameDirs, ","));
// Keep running unless there are less than 2 edits dirs remaining.
conf.setInt(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY, 2);
setUpMiniCluster(conf, false);
// All journals active.
assertTrue(doAnEdit());
// The NN has not terminated (no ExitException thrown)
// Invalidate 1/4 of the redundant journals.
invalidateEditsDirAtIndex(0, false, false);
assertTrue(doAnEdit());
// The NN has not terminated (no ExitException thrown)
// Invalidate 2/4 of the redundant journals.
invalidateEditsDirAtIndex(1, false, false);
assertTrue(doAnEdit());
// The NN has not terminated (no ExitException thrown)
// Invalidate 3/4 of the redundant journals.
invalidateEditsDirAtIndex(2, false, false);
try {
doAnEdit();
fail("A failure of more than the minimum number of redundant journals "
+ "should have halted ");
} catch (RemoteException re) {
assertTrue(re.getClassName().contains("ExitException"));
GenericTestUtils.assertExceptionContains(
"Could not sync enough journals to persistent storage due to " +
"setReadyToFlush failed for too many journals. " +
"Unsynced transactions: 1", re);
}
}
/**
* Replace the journal at index <code>index</code> with one that throws an
* exception on flush.
*
* @param index the index of the journal to take offline.
* @return the original <code>EditLogOutputStream</code> of the journal.
*/
private void invalidateEditsDirAtIndex(int index,
boolean failOnFlush, boolean failOnWrite) throws IOException {
JournalAndStream jas = getJournalAndStream(index);
EditLogFileOutputStream spyElos = spyOnStream(jas);
if (failOnWrite) {
doThrow(new IOException("fail on write()")).when(spyElos).write(
(FSEditLogOp) any());
}
if (failOnFlush) {
doThrow(new IOException("fail on flush()")).when(spyElos).flush();
} else {
doThrow(new IOException("fail on setReadyToFlush()")).when(spyElos)
.setReadyToFlush();
}
}
private EditLogFileOutputStream spyOnStream(JournalAndStream jas) {
EditLogFileOutputStream elos =
(EditLogFileOutputStream) jas.getCurrentStream();
EditLogFileOutputStream spyElos = spy(elos);
jas.setCurrentStreamForTests(spyElos);
return spyElos;
}
/**
* Pull out one of the JournalAndStream objects from the edit log.
*/
private JournalAndStream getJournalAndStream(int index) {
FSImage fsimage = cluster.getNamesystem().getFSImage();
FSEditLog editLog = fsimage.getEditLog();
return editLog.getJournals().get(index);
}
/**
* Do a mutative metadata operation on the file system.
*
* @return true if the operation was successful, false otherwise.
*/
private boolean doAnEdit() throws IOException {
return fs.mkdirs(new Path("/tmp", Integer.toString(editsPerformed++)));
}
}
| 10,497
| 35.578397
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystem.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.Collection;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
import org.apache.hadoop.hdfs.server.namenode.ha.HAState;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.After;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
public class TestFSNamesystem {
@After
public void cleanUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
}
/**
* Tests that the namenode edits dirs are gotten with duplicates removed
*/
@Test
public void testUniqueEditDirs() throws IOException {
Configuration config = new Configuration();
config.set(DFS_NAMENODE_EDITS_DIR_KEY, "file://edits/dir, "
+ "file://edits/dir1,file://edits/dir1"); // overlapping internally
// getNamespaceEditsDirs removes duplicates
Collection<URI> editsDirs = FSNamesystem.getNamespaceEditsDirs(config);
assertEquals(2, editsDirs.size());
}
/**
* Test that FSNamesystem#clear clears all leases.
*/
@Test
public void testFSNamespaceClearLeases() throws Exception {
Configuration conf = new HdfsConfiguration();
File nameDir = new File(MiniDFSCluster.getBaseDirectory(), "name");
conf.set(DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
DFSTestUtil.formatNameNode(conf);
FSNamesystem fsn = FSNamesystem.loadFromDisk(conf);
LeaseManager leaseMan = fsn.getLeaseManager();
leaseMan.addLease("client1", fsn.getFSDirectory().allocateNewInodeId());
assertEquals(1, leaseMan.countLease());
fsn.clear();
leaseMan = fsn.getLeaseManager();
assertEquals(0, leaseMan.countLease());
}
@Test
/**
* Test that isInStartupSafemode returns true only during startup safemode
* and not also during low-resource safemode
*/
public void testStartupSafemode() throws IOException {
Configuration conf = new Configuration();
FSImage fsImage = Mockito.mock(FSImage.class);
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
fsn.leaveSafeMode();
assertTrue("After leaving safemode FSNamesystem.isInStartupSafeMode still "
+ "returned true", !fsn.isInStartupSafeMode());
assertTrue("After leaving safemode FSNamesystem.isInSafeMode still returned"
+ " true", !fsn.isInSafeMode());
fsn.enterSafeMode(true);
assertTrue("After entering safemode due to low resources FSNamesystem."
+ "isInStartupSafeMode still returned true", !fsn.isInStartupSafeMode());
assertTrue("After entering safemode due to low resources FSNamesystem."
+ "isInSafeMode still returned false", fsn.isInSafeMode());
}
@Test
public void testReplQueuesActiveAfterStartupSafemode() throws IOException, InterruptedException{
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
FSNamesystem fsn = Mockito.spy(fsNamesystem);
//Make shouldPopulaeReplQueues return true
HAContext haContext = Mockito.mock(HAContext.class);
HAState haState = Mockito.mock(HAState.class);
Mockito.when(haContext.getState()).thenReturn(haState);
Mockito.when(haState.shouldPopulateReplQueues()).thenReturn(true);
Whitebox.setInternalState(fsn, "haContext", haContext);
//Make NameNode.getNameNodeMetrics() not return null
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues were being populated during very first "
+ "safemode", !fsn.isPopulatingReplQueues());
fsn.leaveSafeMode();
assertTrue("FSNamesystem didn't leave safemode", !fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated even after leaving "
+ "safemode", fsn.isPopulatingReplQueues());
fsn.enterSafeMode(false);
assertTrue("FSNamesystem didn't enter safemode", fsn.isInSafeMode());
assertTrue("Replication queues weren't being populated after entering "
+ "safemode 2nd time", fsn.isPopulatingReplQueues());
}
@Test
public void testFsLockFairness() throws IOException, InterruptedException{
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
conf.setBoolean("dfs.namenode.fslock.fair", true);
FSNamesystem fsNamesystem = new FSNamesystem(conf, fsImage);
assertTrue(fsNamesystem.getFsLockForTests().isFair());
conf.setBoolean("dfs.namenode.fslock.fair", false);
fsNamesystem = new FSNamesystem(conf, fsImage);
assertFalse(fsNamesystem.getFsLockForTests().isFair());
}
@Test
public void testFSNamesystemLockCompatibility() {
FSNamesystemLock rwLock = new FSNamesystemLock(true);
assertEquals(0, rwLock.getReadHoldCount());
rwLock.readLock().lock();
assertEquals(1, rwLock.getReadHoldCount());
rwLock.readLock().lock();
assertEquals(2, rwLock.getReadHoldCount());
rwLock.readLock().unlock();
assertEquals(1, rwLock.getReadHoldCount());
rwLock.readLock().unlock();
assertEquals(0, rwLock.getReadHoldCount());
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0, rwLock.getWriteHoldCount());
rwLock.writeLock().lock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(1, rwLock.getWriteHoldCount());
rwLock.writeLock().lock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(2, rwLock.getWriteHoldCount());
rwLock.writeLock().unlock();
assertTrue(rwLock.isWriteLockedByCurrentThread());
assertEquals(1, rwLock.getWriteHoldCount());
rwLock.writeLock().unlock();
assertFalse(rwLock.isWriteLockedByCurrentThread());
assertEquals(0, rwLock.getWriteHoldCount());
}
@Test
public void testReset() throws Exception {
Configuration conf = new Configuration();
FSEditLog fsEditLog = Mockito.mock(FSEditLog.class);
FSImage fsImage = Mockito.mock(FSImage.class);
Mockito.when(fsImage.getEditLog()).thenReturn(fsEditLog);
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
fsn.clear();
assertFalse(fsn.isImageLoaded());
final INodeDirectory root = (INodeDirectory) fsn.getFSDirectory()
.getINode("/");
assertTrue(root.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
fsn.imageLoadComplete();
assertTrue(fsn.isImageLoaded());
}
@Test
public void testGetEffectiveLayoutVersion() {
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(true, -60, -61, -63));
assertEquals(-61,
FSNamesystem.getEffectiveLayoutVersion(true, -61, -61, -63));
assertEquals(-62,
FSNamesystem.getEffectiveLayoutVersion(true, -62, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(true, -63, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -60, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -61, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -62, -61, -63));
assertEquals(-63,
FSNamesystem.getEffectiveLayoutVersion(false, -63, -61, -63));
}
}
| 9,178
| 37.729958
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSecurityTokenEditLog.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import static org.junit.Assert.assertEquals;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.util.Iterator;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.junit.Test;
import static org.mockito.Mockito.*;
/**
* This class tests the creation and validation of a checkpoint.
*/
public class TestSecurityTokenEditLog {
static final int NUM_DATA_NODES = 1;
// This test creates NUM_THREADS threads and each thread does
// 2 * NUM_TRANSACTIONS Transactions concurrently.
static final int NUM_TRANSACTIONS = 100;
static final int NUM_THREADS = 100;
static final int opsPerTrans = 3;
static {
// No need to fsync for the purposes of tests. This makes
// the tests run much faster.
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
//
// an object that does a bunch of transactions
//
static class Transactions implements Runnable {
final FSNamesystem namesystem;
final int numTransactions;
short replication = 3;
long blockSize = 64;
Transactions(FSNamesystem ns, int num) {
namesystem = ns;
numTransactions = num;
}
// add a bunch of transactions.
@Override
public void run() {
FSEditLog editLog = namesystem.getEditLog();
for (int i = 0; i < numTransactions; i++) {
try {
String renewer = UserGroupInformation.getLoginUser().getUserName();
Token<DelegationTokenIdentifier> token = namesystem
.getDelegationToken(new Text(renewer));
namesystem.renewDelegationToken(token);
namesystem.cancelDelegationToken(token);
editLog.logSync();
} catch (IOException e) {
System.out.println("Transaction " + i + " encountered exception " +
e);
}
}
}
}
/**
* Tests transaction logging in dfs.
*/
@Test
public void testEditLog() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
try {
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
for (Iterator<URI> it = cluster.getNameDirs(0).iterator(); it.hasNext(); ) {
File dir = new File(it.next().getPath());
System.out.println(dir);
}
FSImage fsimage = namesystem.getFSImage();
FSEditLog editLog = fsimage.getEditLog();
// set small size of flush buffer
editLog.setOutputBufferCapacity(2048);
// Create threads and make them run transactions concurrently.
Thread threadId[] = new Thread[NUM_THREADS];
for (int i = 0; i < NUM_THREADS; i++) {
Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS);
threadId[i] = new Thread(trans, "TransactionThread-" + i);
threadId[i].start();
}
// wait for all transactions to get over
for (int i = 0; i < NUM_THREADS; i++) {
try {
threadId[i].join();
} catch (InterruptedException e) {
i--; // retry
}
}
editLog.close();
// Verify that we can read in all the transactions that we have written.
// If there were any corruptions, it is likely that the reading in
// of these transactions will throw an exception.
//
namesystem.getDelegationTokenSecretManager().stopThreads();
int numKeys = namesystem.getDelegationTokenSecretManager().getNumberOfKeys();
int expectedTransactions = NUM_THREADS * opsPerTrans * NUM_TRANSACTIONS + numKeys
+ 2; // + 2 for BEGIN and END txns
for (StorageDirectory sd : fsimage.getStorage().dirIterable(NameNodeDirType.EDITS)) {
File editFile = NNStorage.getFinalizedEditsFile(sd, 1, 1 + expectedTransactions - 1);
System.out.println("Verifying file: " + editFile);
FSEditLogLoader loader = new FSEditLogLoader(namesystem, 0);
long numEdits = loader.loadFSEdits(
new EditLogFileInputStream(editFile), 1);
assertEquals("Verification for " + editFile, expectedTransactions, numEdits);
}
} finally {
if(fileSys != null) fileSys.close();
if(cluster != null) cluster.shutdown();
}
}
@Test(timeout=10000)
public void testEditsForCancelOnTokenExpire() throws IOException,
InterruptedException {
long renewInterval = 2000;
Configuration conf = new Configuration();
conf.setBoolean(
DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
conf.setLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, renewInterval);
conf.setLong(DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY, renewInterval*2);
Text renewer = new Text(UserGroupInformation.getCurrentUser().getUserName());
FSImage fsImage = mock(FSImage.class);
FSEditLog log = mock(FSEditLog.class);
doReturn(log).when(fsImage).getEditLog();
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
DelegationTokenSecretManager dtsm = fsn.getDelegationTokenSecretManager();
try {
dtsm.startThreads();
// get two tokens
Token<DelegationTokenIdentifier> token1 = fsn.getDelegationToken(renewer);
Token<DelegationTokenIdentifier> token2 = fsn.getDelegationToken(renewer);
DelegationTokenIdentifier ident1 =
token1.decodeIdentifier();
DelegationTokenIdentifier ident2 =
token2.decodeIdentifier();
// verify we got the tokens
verify(log, times(1)).logGetDelegationToken(eq(ident1), anyLong());
verify(log, times(1)).logGetDelegationToken(eq(ident2), anyLong());
// this is a little tricky because DTSM doesn't let us set scan interval
// so need to periodically sleep, then stop/start threads to force scan
// renew first token 1/2 to expire
Thread.sleep(renewInterval/2);
fsn.renewDelegationToken(token2);
verify(log, times(1)).logRenewDelegationToken(eq(ident2), anyLong());
// force scan and give it a little time to complete
dtsm.stopThreads(); dtsm.startThreads();
Thread.sleep(250);
// no token has expired yet
verify(log, times(0)).logCancelDelegationToken(eq(ident1));
verify(log, times(0)).logCancelDelegationToken(eq(ident2));
// sleep past expiration of 1st non-renewed token
Thread.sleep(renewInterval/2);
dtsm.stopThreads(); dtsm.startThreads();
Thread.sleep(250);
// non-renewed token should have implicitly been cancelled
verify(log, times(1)).logCancelDelegationToken(eq(ident1));
verify(log, times(0)).logCancelDelegationToken(eq(ident2));
// sleep past expiration of 2nd renewed token
Thread.sleep(renewInterval/2);
dtsm.stopThreads(); dtsm.startThreads();
Thread.sleep(250);
// both tokens should have been implicitly cancelled by now
verify(log, times(1)).logCancelDelegationToken(eq(ident1));
verify(log, times(1)).logCancelDelegationToken(eq(ident2));
} finally {
dtsm.stopThreads();
}
}
}
| 8,926
| 36.826271
| 93
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSXAttrBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
import static org.apache.hadoop.fs.permission.AclEntryType.USER;
import static org.apache.hadoop.fs.permission.FsAction.ALL;
import static org.apache.hadoop.fs.permission.FsAction.READ;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
/**
* Tests NameNode interaction for all XAttr APIs.
* This test suite covers restarting the NN, saving a new checkpoint.
*/
public class FSXAttrBaseTest {
protected static MiniDFSCluster dfsCluster;
protected static Configuration conf;
private static int pathCount = 0;
protected static Path path;
protected static Path filePath;
protected static Path rawPath;
protected static Path rawFilePath;
// XAttrs
protected static final String name1 = "user.a1";
protected static final byte[] value1 = {0x31, 0x32, 0x33};
protected static final byte[] newValue1 = {0x31, 0x31, 0x31};
protected static final String name2 = "user.a2";
protected static final byte[] value2 = {0x37, 0x38, 0x39};
protected static final String name3 = "user.a3";
protected static final String name4 = "user.a4";
protected static final String raw1 = "raw.a1";
protected static final String raw2 = "raw.a2";
protected static final String security1 =
SECURITY_XATTR_UNREADABLE_BY_SUPERUSER;
private static final int MAX_SIZE = security1.length();
protected FileSystem fs;
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { });
@BeforeClass
public static void init() throws Exception {
conf = new HdfsConfiguration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTRS_PER_INODE_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_XATTR_SIZE_KEY, MAX_SIZE);
initCluster(true);
}
@AfterClass
public static void shutdown() {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
}
@Before
public void setUp() throws Exception {
pathCount += 1;
path = new Path("/p" + pathCount);
filePath = new Path(path, "file");
rawPath = new Path("/.reserved/raw/p" + pathCount);
rawFilePath = new Path(rawPath, "file");
initFileSystem();
}
@After
public void destroyFileSystems() {
IOUtils.cleanup(null, fs);
fs = null;
}
/**
* Tests for creating xattr
* 1. Create an xattr using XAttrSetFlag.CREATE.
* 2. Create an xattr which already exists and expect an exception.
* 3. Create multiple xattrs.
* 4. Restart NN and save checkpoint scenarios.
*/
@Test(timeout = 120000)
public void testCreateXAttr() throws Exception {
Map<String, byte[]> expectedXAttrs = Maps.newHashMap();
expectedXAttrs.put(name1, value1);
expectedXAttrs.put(name2, null);
expectedXAttrs.put(security1, null);
doTestCreateXAttr(filePath, expectedXAttrs);
expectedXAttrs.put(raw1, value1);
doTestCreateXAttr(rawFilePath, expectedXAttrs);
}
private void doTestCreateXAttr(Path usePath, Map<String,
byte[]> expectedXAttrs) throws Exception {
DFSTestUtil.createFile(fs, usePath, 8192, (short) 1, 0xFEED);
fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
Map<String, byte[]> xattrs = fs.getXAttrs(usePath);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(value1, xattrs.get(name1));
fs.removeXAttr(usePath, name1);
xattrs = fs.getXAttrs(usePath);
Assert.assertEquals(xattrs.size(), 0);
// Create xattr which already exists.
fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.setXAttr(usePath, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
Assert.fail("Creating xattr which already exists should fail.");
} catch (IOException e) {
}
fs.removeXAttr(usePath, name1);
// Create the xattrs
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
fs.setXAttr(usePath, ent.getKey(), ent.getValue(),
EnumSet.of(XAttrSetFlag.CREATE));
}
xattrs = fs.getXAttrs(usePath);
Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
final byte[] val =
(ent.getValue() == null) ? new byte[0] : ent.getValue();
Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
}
restart(false);
initFileSystem();
xattrs = fs.getXAttrs(usePath);
Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
final byte[] val =
(ent.getValue() == null) ? new byte[0] : ent.getValue();
Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
}
restart(true);
initFileSystem();
xattrs = fs.getXAttrs(usePath);
Assert.assertEquals(xattrs.size(), expectedXAttrs.size());
for (Map.Entry<String, byte[]> ent : expectedXAttrs.entrySet()) {
final byte[] val =
(ent.getValue() == null) ? new byte[0] : ent.getValue();
Assert.assertArrayEquals(val, xattrs.get(ent.getKey()));
}
fs.delete(usePath, false);
}
/**
* Tests for replacing xattr
* 1. Replace an xattr using XAttrSetFlag.REPLACE.
* 2. Replace an xattr which doesn't exist and expect an exception.
* 3. Create multiple xattrs and replace some.
* 4. Restart NN and save checkpoint scenarios.
*/
@Test(timeout = 120000)
public void testReplaceXAttr() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
Map<String, byte[]> xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
fs.removeXAttr(path, name1);
// Replace xattr which does not exist.
try {
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.REPLACE));
Assert.fail("Replacing xattr which does not exist should fail.");
} catch (IOException e) {
}
// Create two xattrs, then replace one
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, null, EnumSet.of(XAttrSetFlag.REPLACE));
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
restart(false);
initFileSystem();
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
restart(true);
initFileSystem();
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(new byte[0], xattrs.get(name2));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
}
/**
* Tests for setting xattr
* 1. Set xattr with XAttrSetFlag.CREATE|XAttrSetFlag.REPLACE flag.
* 2. Set xattr with illegal name.
* 3. Set xattr without XAttrSetFlag.
* 4. Set xattr and total number exceeds max limit.
* 5. Set xattr and name is too long.
* 6. Set xattr and value is too long.
*/
@Test(timeout = 120000)
public void testSetXAttr() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
Map<String, byte[]> xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(value1, xattrs.get(name1));
fs.removeXAttr(path, name1);
// Set xattr with null name
try {
fs.setXAttr(path, null, value1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with null name should fail.");
} catch (NullPointerException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
}
// Set xattr with empty name: "user."
try {
fs.setXAttr(path, "user.", value1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with empty name should fail.");
} catch (RemoteException e) {
assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be empty", e);
}
// Set xattr with invalid name: "a1"
try {
fs.setXAttr(path, "a1", value1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
Assert.fail("Setting xattr with invalid name prefix or without " +
"name prefix should fail.");
} catch (RemoteException e) {
assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("XAttr name must be prefixed", e);
}
// Set xattr without XAttrSetFlag
fs.setXAttr(path, name1, value1);
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(value1, xattrs.get(name1));
fs.removeXAttr(path, name1);
// XAttr exists, and replace it using CREATE|REPLACE flag.
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
fs.removeXAttr(path, name1);
// Total number exceeds max limit
fs.setXAttr(path, name1, value1);
fs.setXAttr(path, name2, value2);
fs.setXAttr(path, name3, null);
try {
fs.setXAttr(path, name4, null);
Assert.fail("Setting xattr should fail if total number of xattrs " +
"for inode exceeds max limit.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Cannot add additional XAttr", e);
}
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
fs.removeXAttr(path, name3);
// Name length exceeds max limit
String longName = "user.0123456789abcdefX0123456789abcdefX0123456789abcdef";
try {
fs.setXAttr(path, longName, null);
Assert.fail("Setting xattr should fail if name is too long.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big", e);
GenericTestUtils.assertExceptionContains("total size is 50", e);
}
// Value length exceeds max limit
byte[] longValue = new byte[MAX_SIZE];
try {
fs.setXAttr(path, "user.a", longValue);
Assert.fail("Setting xattr should fail if value is too long.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("XAttr is too big", e);
GenericTestUtils.assertExceptionContains("total size is 38", e);
}
// Name + value exactly equal the limit
String name = "user.111";
byte[] value = new byte[MAX_SIZE-3];
fs.setXAttr(path, name, value);
}
/**
* getxattr tests. Test that getxattr throws an exception if any of
* the following are true:
* an xattr that was requested doesn't exist
* the caller specifies an unknown namespace
* the caller doesn't have access to the namespace
* the caller doesn't have permission to get the value of the xattr
* the caller does not have search access to the parent directory
* the caller has only read access to the owning directory
* the caller has only search access to the owning directory and
* execute/search access to the actual entity
* the caller does not have search access to the owning directory and read
* access to the actual entity
*/
@Test(timeout = 120000)
public void testGetXAttrs() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
final byte[] theValue = fs.getXAttr(path, "USER.a2");
Assert.assertArrayEquals(value2, theValue);
/* An XAttr that was requested does not exist. */
try {
final byte[] value = fs.getXAttr(path, name3);
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"At least one of the attributes provided was not found.", e);
}
/* Throw an exception if an xattr that was requested does not exist. */
{
final List<String> names = Lists.newArrayList();
names.add(name1);
names.add(name2);
names.add(name3);
try {
final Map<String, byte[]> xattrs = fs.getXAttrs(path, names);
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"At least one of the attributes provided was not found.", e);
}
}
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
/* Unknown namespace should throw an exception. */
try {
final byte[] xattr = fs.getXAttr(path, "wackynamespace.foo");
Assert.fail("expected IOException");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains
("An XAttr name must be prefixed with " +
"user/trusted/security/system/raw, " +
"followed by a '.'",
e);
}
/*
* The 'trusted' namespace should not be accessible and should throw an
* exception.
*/
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
fs.setXAttr(path, "trusted.foo", "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
final byte[] xattr = userFs.getXAttr(path, "trusted.foo");
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
}
fs.setXAttr(path, name1, "1234".getBytes());
/*
* Test that an exception is thrown if the caller doesn't have permission to
* get the value of the xattr.
*/
/* Set access so that only the owner has access. */
fs.setPermission(path, new FsPermission((short) 0700));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
final byte[] xattr = userFs.getXAttr(path, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* The caller must have search access to the parent directory.
*/
final Path childDir = new Path(path, "child" + pathCount);
/* Set access to parent so that only the owner has access. */
FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
fs.setXAttr(childDir, name1, "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
final byte[] xattr = userFs.getXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/* Check that read access to the owning directory is not good enough. */
fs.setPermission(path, new FsPermission((short) 0704));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
final byte[] xattr = userFs.getXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that search access to the owning directory and search/execute
* access to the actual entity with extended attributes is not good enough.
*/
fs.setPermission(path, new FsPermission((short) 0701));
fs.setPermission(childDir, new FsPermission((short) 0701));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
final byte[] xattr = userFs.getXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that search access to the owning directory and read access to
* the actual entity with the extended attribute is good enough.
*/
fs.setPermission(path, new FsPermission((short) 0701));
fs.setPermission(childDir, new FsPermission((short) 0704));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
final byte[] xattr = userFs.getXAttr(childDir, name1);
return null;
}
});
}
/**
* Tests for removing xattr
* 1. Remove xattr.
* 2. Restart NN and save checkpoint scenarios.
*/
@Test(timeout = 120000)
public void testRemoveXAttr() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
Map<String, byte[]> xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
restart(false);
initFileSystem();
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
restart(true);
initFileSystem();
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(new byte[0], xattrs.get(name3));
fs.removeXAttr(path, name3);
}
/**
* removexattr tests. Test that removexattr throws an exception if any of
* the following are true:
* an xattr that was requested doesn't exist
* the caller specifies an unknown namespace
* the caller doesn't have access to the namespace
* the caller doesn't have permission to get the value of the xattr
* the caller does not have "execute" (scan) access to the parent directory
* the caller has only read access to the owning directory
* the caller has only execute access to the owning directory and execute
* access to the actual entity
* the caller does not have execute access to the owning directory and write
* access to the actual entity
*/
@Test(timeout = 120000)
public void testRemoveXAttrPermissions() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
try {
fs.removeXAttr(path, name2);
fs.removeXAttr(path, name2);
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("No matching attributes found", e);
}
/* Unknown namespace should throw an exception. */
final String expectedExceptionString = "An XAttr name must be prefixed " +
"with user/trusted/security/system/raw, followed by a '.'";
try {
fs.removeXAttr(path, "wackynamespace.foo");
Assert.fail("expected IOException");
} catch (RemoteException e) {
assertEquals("Unexpected RemoteException: " + e, e.getClassName(),
HadoopIllegalArgumentException.class.getCanonicalName());
GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
} catch (HadoopIllegalArgumentException e) {
GenericTestUtils.assertExceptionContains(expectedExceptionString, e);
}
/*
* The 'trusted' namespace should not be accessible and should throw an
* exception.
*/
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
fs.setXAttr(path, "trusted.foo", "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(path, "trusted.foo");
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("User doesn't have permission", e);
} finally {
fs.removeXAttr(path, "trusted.foo");
}
/*
* Test that an exception is thrown if the caller doesn't have permission to
* get the value of the xattr.
*/
/* Set access so that only the owner has access. */
fs.setPermission(path, new FsPermission((short) 0700));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(path, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* The caller must have "execute" (scan) access to the parent directory.
*/
final Path childDir = new Path(path, "child" + pathCount);
/* Set access to parent so that only the owner has access. */
FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short)0700));
fs.setXAttr(childDir, name1, "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/* Check that read access to the owning directory is not good enough. */
fs.setPermission(path, new FsPermission((short) 0704));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that execute access to the owning directory and scan access to
* the actual entity with extended attributes is not good enough.
*/
fs.setPermission(path, new FsPermission((short) 0701));
fs.setPermission(childDir, new FsPermission((short) 0701));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
Assert.fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that execute access to the owning directory and write access to
* the actual entity with extended attributes is good enough.
*/
fs.setPermission(path, new FsPermission((short) 0701));
fs.setPermission(childDir, new FsPermission((short) 0706));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.removeXAttr(childDir, name1);
return null;
}
});
}
@Test(timeout = 120000)
public void testRenameFileWithXAttr() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
Path renamePath = new Path(path.toString() + "-rename");
fs.rename(path, renamePath);
Map<String, byte[]> xattrs = fs.getXAttrs(renamePath);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
fs.removeXAttr(renamePath, name1);
fs.removeXAttr(renamePath, name2);
}
/**
* Test the listXAttrs api.
* listXAttrs on a path that doesn't exist.
* listXAttrs on a path with no XAttrs
* Check basic functionality.
* Check that read access to parent dir is not enough to get xattr names
* Check that write access to the parent dir is not enough to get names
* Check that execute/scan access to the parent dir is sufficient to get
* xattr names.
*/
@Test(timeout = 120000)
public void testListXAttrs() throws Exception {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
/* listXAttrs in a path that doesn't exist. */
try {
fs.listXAttrs(path);
fail("expected FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("cannot find", e);
}
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
/* listXAttrs on a path with no XAttrs.*/
final List<String> noXAttrs = fs.listXAttrs(path);
assertTrue("XAttrs were found?", noXAttrs.size() == 0);
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
/** Check basic functionality. */
final List<String> xattrNames = fs.listXAttrs(path);
assertTrue(xattrNames.contains(name1));
assertTrue(xattrNames.contains(name2));
assertTrue(xattrNames.size() == 2);
/* Check that read access to parent dir is not enough to get xattr names. */
fs.setPermission(path, new FsPermission((short) 0704));
final Path childDir = new Path(path, "child" + pathCount);
FileSystem.mkdirs(fs, childDir, FsPermission.createImmutable((short) 0700));
fs.setXAttr(childDir, name1, "1234".getBytes());
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
});
fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that write access to the parent dir is not enough to get names.
*/
fs.setPermission(path, new FsPermission((short) 0702));
try {
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
});
fail("expected IOException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Permission denied", e);
}
/*
* Check that execute/scan access to the parent dir is sufficient to get
* xattr names.
*/
fs.setPermission(path, new FsPermission((short) 0701));
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
userFs.listXAttrs(childDir);
return null;
}
});
/*
* Test that xattrs in the "trusted" namespace are filtered correctly.
*/
fs.setXAttr(childDir, "trusted.myxattr", "1234".getBytes());
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
assertTrue(userFs.listXAttrs(childDir).size() == 1);
return null;
}
});
assertTrue(fs.listXAttrs(childDir).size() == 2);
}
/**
* Steps:
* 1) Set xattrs on a file.
* 2) Remove xattrs from that file.
* 3) Save a checkpoint and restart NN.
* 4) Set xattrs again on the same file.
* 5) Remove xattrs from that file.
* 6) Restart NN without saving a checkpoint.
* 7) Set xattrs again on the same file.
*/
@Test(timeout = 120000)
public void testCleanupXAttrs() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
restart(true);
initFileSystem();
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
restart(false);
initFileSystem();
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
Map<String, byte[]> xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
}
@Test(timeout = 120000)
public void testXAttrAcl() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
fs.setOwner(path, BRUCE.getUserName(), null);
FileSystem fsAsBruce = createFileSystem(BRUCE);
FileSystem fsAsDiana = createFileSystem(DIANA);
fsAsBruce.setXAttr(path, name1, value1);
Map<String, byte[]> xattrs;
try {
xattrs = fsAsDiana.getXAttrs(path);
Assert.fail("Diana should not have read access to get xattrs");
} catch (AccessControlException e) {
// Ignore
}
// Give Diana read permissions to the path
fsAsBruce.modifyAclEntries(path, Lists.newArrayList(
aclEntry(ACCESS, USER, DIANA.getUserName(), READ)));
xattrs = fsAsDiana.getXAttrs(path);
Assert.assertArrayEquals(value1, xattrs.get(name1));
try {
fsAsDiana.removeXAttr(path, name1);
Assert.fail("Diana should not have write access to remove xattrs");
} catch (AccessControlException e) {
// Ignore
}
try {
fsAsDiana.setXAttr(path, name2, value2);
Assert.fail("Diana should not have write access to set xattrs");
} catch (AccessControlException e) {
// Ignore
}
fsAsBruce.modifyAclEntries(path, Lists.newArrayList(
aclEntry(ACCESS, USER, DIANA.getUserName(), ALL)));
fsAsDiana.setXAttr(path, name2, value2);
Assert.assertArrayEquals(value2, fsAsDiana.getXAttrs(path).get(name2));
fsAsDiana.removeXAttr(path, name1);
fsAsDiana.removeXAttr(path, name2);
}
@Test(timeout = 120000)
public void testRawXAttrs() throws Exception {
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] {"mygroup"});
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0750));
fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
{
// getXAttr
final byte[] value = fs.getXAttr(rawPath, raw1);
Assert.assertArrayEquals(value, value1);
}
{
// getXAttrs
final Map<String, byte[]> xattrs = fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(value1, xattrs.get(raw1));
fs.removeXAttr(rawPath, raw1);
}
{
// replace and re-get
fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath, raw1, newValue1, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
final Map<String,byte[]> xattrs = fs.getXAttrs(rawPath);
Assert.assertEquals(xattrs.size(), 1);
Assert.assertArrayEquals(newValue1, xattrs.get(raw1));
fs.removeXAttr(rawPath, raw1);
}
{
// listXAttrs on rawPath ensuring raw.* xattrs are returned
fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE));
final List<String> xattrNames = fs.listXAttrs(rawPath);
assertTrue(xattrNames.contains(raw1));
assertTrue(xattrNames.contains(raw2));
assertTrue(xattrNames.size() == 2);
fs.removeXAttr(rawPath, raw1);
fs.removeXAttr(rawPath, raw2);
}
{
// listXAttrs on non-rawPath ensuring no raw.* xattrs returned
fs.setXAttr(rawPath, raw1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(rawPath, raw2, value2, EnumSet.of(XAttrSetFlag.CREATE));
final List<String> xattrNames = fs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
fs.removeXAttr(rawPath, raw1);
fs.removeXAttr(rawPath, raw2);
}
{
/*
* Test non-root user operations in the "raw.*" namespace.
*/
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
// Test that non-root can not set xattrs in the "raw.*" namespace
try {
// non-raw path
userFs.setXAttr(path, raw1, value1);
fail("setXAttr should have thrown");
} catch (AccessControlException e) {
// ignore
}
try {
// raw path
userFs.setXAttr(rawPath, raw1, value1);
fail("setXAttr should have thrown");
} catch (AccessControlException e) {
// ignore
}
// Test that non-root can not do getXAttrs in the "raw.*" namespace
try {
// non-raw path
userFs.getXAttrs(rawPath);
fail("getXAttrs should have thrown");
} catch (AccessControlException e) {
// ignore
}
try {
// raw path
userFs.getXAttrs(path);
fail("getXAttrs should have thrown");
} catch (AccessControlException e) {
// ignore
}
// Test that non-root can not do getXAttr in the "raw.*" namespace
try {
// non-raw path
userFs.getXAttr(rawPath, raw1);
fail("getXAttr should have thrown");
} catch (AccessControlException e) {
// ignore
}
try {
// raw path
userFs.getXAttr(path, raw1);
fail("getXAttr should have thrown");
} catch (AccessControlException e) {
// ignore
}
return null;
}
});
}
{
/*
* Test that non-root can not do getXAttr in the "raw.*" namespace
*/
fs.setXAttr(rawPath, raw1, value1);
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
try {
// non-raw path
userFs.getXAttr(rawPath, raw1);
fail("getXAttr should have thrown");
} catch (AccessControlException e) {
// ignore
}
try {
// raw path
userFs.getXAttr(path, raw1);
fail("getXAttr should have thrown");
} catch (AccessControlException e) {
// ignore
}
/*
* Test that only root can see raw.* xattrs returned from listXAttr
* and non-root can't do listXAttrs on /.reserved/raw.
*/
// non-raw path
final List<String> xattrNames = userFs.listXAttrs(path);
assertTrue(xattrNames.size() == 0);
try {
// raw path
userFs.listXAttrs(rawPath);
fail("listXAttrs on raw path should have thrown");
} catch (AccessControlException e) {
// ignore
}
return null;
}
});
fs.removeXAttr(rawPath, raw1);
}
}
/**
* This tests the "unreadable by superuser" xattr which denies access to a
* file for the superuser. See HDFS-6705 for details.
*/
@Test(timeout = 120000)
public void testUnreadableBySuperuserXAttr() throws Exception {
// Run tests as superuser...
doTestUnreadableBySuperuserXAttr(fs, true);
// ...and again as non-superuser
final UserGroupInformation user = UserGroupInformation.
createUserForTesting("user", new String[] { "mygroup" });
user.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final FileSystem userFs = dfsCluster.getFileSystem();
doTestUnreadableBySuperuserXAttr(userFs, false);
return null;
}
});
}
private void doTestUnreadableBySuperuserXAttr(FileSystem userFs,
boolean expectOpenFailure) throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short) 0777));
DFSTestUtil.createFile(userFs, filePath, 8192, (short) 1, 0xFEED);
try {
doTUBSXAInt(userFs, expectOpenFailure);
// Deleting the file is allowed.
userFs.delete(filePath, false);
} finally {
fs.delete(path, true);
}
}
private void doTUBSXAInt(FileSystem userFs, boolean expectOpenFailure)
throws Exception {
// Test that xattr can't be set on a dir
try {
userFs.setXAttr(path, security1, null, EnumSet.of(XAttrSetFlag.CREATE));
} catch (IOException e) {
// WebHDFS throws IOException instead of RemoteException
GenericTestUtils.assertExceptionContains("Can only set '" +
SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' on a file", e);
}
// Test that xattr can actually be set. Repeatedly.
userFs.setXAttr(filePath, security1, null,
EnumSet.of(XAttrSetFlag.CREATE));
verifySecurityXAttrExists(userFs);
userFs.setXAttr(filePath, security1, null, EnumSet.of(XAttrSetFlag.CREATE,
XAttrSetFlag.REPLACE));
verifySecurityXAttrExists(userFs);
// Test that the xattr can't be deleted by anyone.
try {
userFs.removeXAttr(filePath, security1);
Assert.fail("Removing security xattr should fail.");
} catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains("The xattr '" +
SECURITY_XATTR_UNREADABLE_BY_SUPERUSER + "' can not be deleted.", e);
}
// Test that xattr can be read.
verifySecurityXAttrExists(userFs);
// Test that a value can't be set for the xattr.
try {
userFs.setXAttr(filePath, security1,
value1,EnumSet.of(XAttrSetFlag.REPLACE));
fail("Should have thrown on attempt to set value");
} catch (AccessControlException e) {
GenericTestUtils.assertExceptionContains("Values are not allowed", e);
}
// Test that unreadable by superuser xattr appears in listXAttrs results
// (for superuser and non-superuser)
final List<String> xattrNames = userFs.listXAttrs(filePath);
assertTrue(xattrNames.contains(security1));
assertTrue(xattrNames.size() == 1);
verifyFileAccess(userFs, expectOpenFailure);
// Rename of the file is allowed by anyone.
Path toPath = new Path(filePath.toString() + "x");
userFs.rename(filePath, toPath);
userFs.rename(toPath, filePath);
}
private void verifySecurityXAttrExists(FileSystem userFs) throws Exception {
try {
final Map<String, byte[]> xattrs = userFs.getXAttrs(filePath);
Assert.assertEquals(1, xattrs.size());
Assert.assertNotNull(xattrs.get(security1));
Assert.assertArrayEquals("expected empty byte[] from getXAttr",
new byte[0], userFs.getXAttr(filePath, security1));
} catch (AccessControlException e) {
fail("getXAttrs failed but expected it to succeed");
}
}
private void verifyFileAccess(FileSystem userFs, boolean expectOpenFailure)
throws Exception {
// Test that a file with the xattr can or can't be opened.
try {
userFs.open(filePath);
assertFalse("open succeeded but expected it to fail", expectOpenFailure);
} catch (AccessControlException e) {
assertTrue("open failed but expected it to succeed", expectOpenFailure);
}
}
/**
* Creates a FileSystem for the super-user.
*
* @return FileSystem for super-user
* @throws Exception if creation fails
*/
protected FileSystem createFileSystem() throws Exception {
return dfsCluster.getFileSystem();
}
/**
* Creates a FileSystem for a specific user.
*
* @param user UserGroupInformation specific user
* @return FileSystem for specific user
* @throws Exception if creation fails
*/
protected FileSystem createFileSystem(UserGroupInformation user)
throws Exception {
return DFSTestUtil.getFileSystemAs(user, conf);
}
/**
* Initializes all FileSystem instances used in the tests.
*
* @throws Exception if initialization fails
*/
private void initFileSystem() throws Exception {
fs = createFileSystem();
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem
* instances for our test users.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @throws Exception if any step fails
*/
protected static void initCluster(boolean format) throws Exception {
dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
dfsCluster.waitActive();
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @throws Exception if restart fails
*/
protected static void restart(boolean checkpoint) throws Exception {
NameNode nameNode = dfsCluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false);
}
}
| 47,369
| 35.326687
| 110
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.util.HashSet;
import java.util.Set;
import org.apache.commons.io.FileUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
import com.google.common.collect.Sets;
/**
* This tests data recovery mode for the NameNode.
*/
public class TestNameNodeRecovery {
private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class);
private static final StartupOption recoverStartOpt = StartupOption.RECOVER;
private static final File TEST_DIR = PathUtils.getTestDir(TestNameNodeRecovery.class);
static {
recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL);
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
static void runEditLogTest(EditLogTestSetup elts) throws IOException {
final File TEST_LOG_NAME = new File(TEST_DIR, "test_edit_log");
final OpInstanceCache cache = new OpInstanceCache();
EditLogFileOutputStream elfos = null;
EditLogFileInputStream elfis = null;
try {
elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 0);
elfos.create(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
elts.addTransactionsToLog(elfos, cache);
elfos.setReadyToFlush();
elfos.flushAndSync(true);
elfos.close();
elfos = null;
elfis = new EditLogFileInputStream(TEST_LOG_NAME);
elfis.setMaxOpSize(elts.getMaxOpSize());
// reading through normally will get you an exception
Set<Long> validTxIds = elts.getValidTxIds();
FSEditLogOp op = null;
long prevTxId = 0;
try {
while (true) {
op = elfis.nextOp();
if (op == null) {
break;
}
LOG.debug("read txid " + op.txid);
if (!validTxIds.contains(op.getTransactionId())) {
fail("read txid " + op.getTransactionId() +
", which we did not expect to find.");
}
validTxIds.remove(op.getTransactionId());
prevTxId = op.getTransactionId();
}
if (elts.getLastValidTxId() != -1) {
fail("failed to throw IoException as expected");
}
} catch (IOException e) {
if (elts.getLastValidTxId() == -1) {
fail("expected all transactions to be valid, but got exception " +
"on txid " + prevTxId);
} else {
assertEquals(prevTxId, elts.getLastValidTxId());
}
}
if (elts.getLastValidTxId() != -1) {
// let's skip over the bad transaction
op = null;
prevTxId = 0;
try {
while (true) {
op = elfis.nextValidOp();
if (op == null) {
break;
}
prevTxId = op.getTransactionId();
assertTrue(validTxIds.remove(op.getTransactionId()));
}
} catch (Throwable e) {
fail("caught IOException while trying to skip over bad " +
"transaction. message was " + e.getMessage() +
"\nstack trace\n" + StringUtils.stringifyException(e));
}
}
// We should have read every valid transaction.
assertTrue(validTxIds.isEmpty());
} finally {
IOUtils.cleanup(LOG, elfos, elfis);
}
}
/**
* A test scenario for the edit log
*/
private static abstract class EditLogTestSetup {
/**
* Set up the edit log.
*/
abstract public void addTransactionsToLog(EditLogOutputStream elos,
OpInstanceCache cache) throws IOException;
/**
* Get the transaction ID right before the transaction which causes the
* normal edit log loading process to bail out-- or -1 if the first
* transaction should be bad.
*/
abstract public long getLastValidTxId();
/**
* Get the transaction IDs which should exist and be valid in this
* edit log.
**/
abstract public Set<Long> getValidTxIds();
/**
* Return the maximum opcode size we will use for input.
*/
public int getMaxOpSize() {
return DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT;
}
}
static void padEditLog(EditLogOutputStream elos, int paddingLength)
throws IOException {
if (paddingLength <= 0) {
return;
}
byte buf[] = new byte[4096];
for (int i = 0; i < buf.length; i++) {
buf[i] = (byte)-1;
}
int pad = paddingLength;
while (pad > 0) {
int toWrite = pad > buf.length ? buf.length : pad;
elos.writeRaw(buf, 0, toWrite);
pad -= toWrite;
}
}
static void addDeleteOpcode(EditLogOutputStream elos,
OpInstanceCache cache, long txId, String path) throws IOException {
DeleteOp op = DeleteOp.getInstance(cache);
op.setTransactionId(txId);
op.setPath(path);
op.setTimestamp(0);
elos.write(op);
}
/**
* Test the scenario where we have an empty edit log.
*
* This class is also useful in testing whether we can correctly handle
* various amounts of padding bytes at the end of the log. We should be
* able to handle any amount of padding (including no padding) without
* throwing an exception.
*/
private static class EltsTestEmptyLog extends EditLogTestSetup {
private final int paddingLength;
public EltsTestEmptyLog(int paddingLength) {
this.paddingLength = paddingLength;
}
public void addTransactionsToLog(EditLogOutputStream elos,
OpInstanceCache cache) throws IOException {
padEditLog(elos, paddingLength);
}
public long getLastValidTxId() {
return -1;
}
public Set<Long> getValidTxIds() {
return new HashSet<Long>();
}
}
/** Test an empty edit log */
@Test(timeout=180000)
public void testEmptyLog() throws IOException {
runEditLogTest(new EltsTestEmptyLog(0));
}
/** Test an empty edit log with padding */
@Test(timeout=180000)
public void testEmptyPaddedLog() throws IOException {
runEditLogTest(new EltsTestEmptyLog(
EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH));
}
/** Test an empty edit log with extra-long padding */
@Test(timeout=180000)
public void testEmptyExtraPaddedLog() throws IOException {
runEditLogTest(new EltsTestEmptyLog(
3 * EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH));
}
/**
* Test using a non-default maximum opcode length.
*/
private static class EltsTestNonDefaultMaxOpSize extends EditLogTestSetup {
public EltsTestNonDefaultMaxOpSize() {
}
@Override
public void addTransactionsToLog(EditLogOutputStream elos,
OpInstanceCache cache) throws IOException {
addDeleteOpcode(elos, cache, 0, "/foo");
addDeleteOpcode(elos, cache, 1,
"/supercalifragalisticexpialadocius.supercalifragalisticexpialadocius");
}
@Override
public long getLastValidTxId() {
return 0;
}
@Override
public Set<Long> getValidTxIds() {
return Sets.newHashSet(0L);
}
public int getMaxOpSize() {
return 40;
}
}
/** Test an empty edit log with extra-long padding */
@Test(timeout=180000)
public void testNonDefaultMaxOpSize() throws IOException {
runEditLogTest(new EltsTestNonDefaultMaxOpSize());
}
/**
* Test the scenario where an edit log contains some padding (0xff) bytes
* followed by valid opcode data.
*
* These edit logs are corrupt, but all the opcodes should be recoverable
* with recovery mode.
*/
private static class EltsTestOpcodesAfterPadding extends EditLogTestSetup {
private final int paddingLength;
public EltsTestOpcodesAfterPadding(int paddingLength) {
this.paddingLength = paddingLength;
}
public void addTransactionsToLog(EditLogOutputStream elos,
OpInstanceCache cache) throws IOException {
padEditLog(elos, paddingLength);
addDeleteOpcode(elos, cache, 0, "/foo");
}
public long getLastValidTxId() {
return 0;
}
public Set<Long> getValidTxIds() {
return Sets.newHashSet(0L);
}
}
@Test(timeout=180000)
public void testOpcodesAfterPadding() throws IOException {
runEditLogTest(new EltsTestOpcodesAfterPadding(
EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH));
}
@Test(timeout=180000)
public void testOpcodesAfterExtraPadding() throws IOException {
runEditLogTest(new EltsTestOpcodesAfterPadding(
3 * EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH));
}
private static class EltsTestGarbageInEditLog extends EditLogTestSetup {
final private long BAD_TXID = 4;
final private long MAX_TXID = 10;
@Override
public void addTransactionsToLog(EditLogOutputStream elos,
OpInstanceCache cache) throws IOException {
for (long txid = 1; txid <= MAX_TXID; txid++) {
if (txid == BAD_TXID) {
byte garbage[] = { 0x1, 0x2, 0x3 };
elos.writeRaw(garbage, 0, garbage.length);
}
else {
DeleteOp op;
op = DeleteOp.getInstance(cache);
op.setTransactionId(txid);
op.setPath("/foo." + txid);
op.setTimestamp(txid);
elos.write(op);
}
}
}
@Override
public long getLastValidTxId() {
return BAD_TXID - 1;
}
@Override
public Set<Long> getValidTxIds() {
return Sets.newHashSet(1L , 2L, 3L, 5L, 6L, 7L, 8L, 9L, 10L);
}
}
/** Test that we can successfully recover from a situation where there is
* garbage in the middle of the edit log file output stream. */
@Test(timeout=180000)
public void testSkipEdit() throws IOException {
runEditLogTest(new EltsTestGarbageInEditLog());
}
/**
* An algorithm for corrupting an edit log.
*/
static interface Corruptor {
/*
* Corrupt an edit log file.
*
* @param editFile The edit log file
*/
public void corrupt(File editFile) throws IOException;
/*
* Explain whether we need to read the log in recovery mode
*
* @param finalized True if the edit log in question is finalized.
* We're a little more lax about reading unfinalized
* logs. We will allow a small amount of garbage at
* the end. In a finalized log, every byte must be
* perfect.
*
* @return Whether we need to read the log in recovery mode
*/
public boolean needRecovery(boolean finalized);
/*
* Get the name of this corruptor
*
* @return The Corruptor name
*/
public String getName();
}
static class TruncatingCorruptor implements Corruptor {
@Override
public void corrupt(File editFile) throws IOException {
// Corrupt the last edit
long fileLen = editFile.length();
RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
rwf.setLength(fileLen - 1);
rwf.close();
}
@Override
public boolean needRecovery(boolean finalized) {
return finalized;
}
@Override
public String getName() {
return "truncated";
}
}
static class PaddingCorruptor implements Corruptor {
@Override
public void corrupt(File editFile) throws IOException {
// Add junk to the end of the file
RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
rwf.seek(editFile.length());
for (int i = 0; i < 129; i++) {
rwf.write((byte)0);
}
rwf.write(0xd);
rwf.write(0xe);
rwf.write(0xa);
rwf.write(0xd);
rwf.close();
}
@Override
public boolean needRecovery(boolean finalized) {
// With finalized edit logs, we ignore what's at the end as long as we
// can make it to the correct transaction ID.
// With unfinalized edit logs, the finalization process ignores garbage
// at the end.
return false;
}
@Override
public String getName() {
return "padFatal";
}
}
static class SafePaddingCorruptor implements Corruptor {
private final byte padByte;
public SafePaddingCorruptor(byte padByte) {
this.padByte = padByte;
assert ((this.padByte == 0) || (this.padByte == -1));
}
@Override
public void corrupt(File editFile) throws IOException {
// Add junk to the end of the file
RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
rwf.seek(editFile.length());
rwf.write((byte)-1);
for (int i = 0; i < 1024; i++) {
rwf.write(padByte);
}
rwf.close();
}
@Override
public boolean needRecovery(boolean finalized) {
return false;
}
@Override
public String getName() {
return "pad" + ((int)padByte);
}
}
/**
* Create a test configuration that will exercise the initializeGenericKeys
* code path. This is a regression test for HDFS-4279.
*/
static void setupRecoveryTestConf(Configuration conf) throws IOException {
conf.set(DFSConfigKeys.DFS_NAMESERVICES, "ns1");
conf.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX,
"ns1"), "nn1,nn2");
String baseDir = System.getProperty(
MiniDFSCluster.PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
File nameDir = new File(baseDir, "nameR");
File secondaryDir = new File(baseDir, "namesecondaryR");
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
DFS_NAMENODE_NAME_DIR_KEY, "ns1", "nn1"),
nameDir.getCanonicalPath());
conf.set(DFSUtil.addKeySuffixes(DFSConfigKeys.
DFS_NAMENODE_CHECKPOINT_DIR_KEY, "ns1", "nn1"),
secondaryDir.getCanonicalPath());
conf.unset(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY);
conf.unset(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
FileUtils.deleteQuietly(nameDir);
if (!nameDir.mkdirs()) {
throw new RuntimeException("failed to make directory " +
nameDir.getAbsolutePath());
}
FileUtils.deleteQuietly(secondaryDir);
if (!secondaryDir.mkdirs()) {
throw new RuntimeException("failed to make directory " +
secondaryDir.getAbsolutePath());
}
}
static void testNameNodeRecoveryImpl(Corruptor corruptor, boolean finalize)
throws IOException {
final String TEST_PATH = "/test/path/dir";
final String TEST_PATH2 = "/second/dir";
final boolean needRecovery = corruptor.needRecovery(finalize);
// start a cluster
Configuration conf = new HdfsConfiguration();
setupRecoveryTestConf(conf);
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
StorageDirectory sd = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.manageNameDfsDirs(false).build();
cluster.waitActive();
if (!finalize) {
// Normally, the in-progress edit log would be finalized by
// FSEditLog#endCurrentLogSegment. For testing purposes, we
// disable that here.
FSEditLog spyLog =
spy(cluster.getNameNode().getFSImage().getEditLog());
doNothing().when(spyLog).endCurrentLogSegment(true);
DFSTestUtil.setEditLogForTesting(cluster.getNamesystem(), spyLog);
}
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
fileSys.mkdirs(new Path(TEST_PATH));
fileSys.mkdirs(new Path(TEST_PATH2));
sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
assertTrue("Should exist: " + editFile, editFile.exists());
// Corrupt the edit log
LOG.info("corrupting edit log file '" + editFile + "'");
corruptor.corrupt(editFile);
// If needRecovery == true, make sure that we can't start the
// cluster normally before recovery
cluster = null;
try {
LOG.debug("trying to start normally (this should fail)...");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.enableManagedDfsDirsRedundancy(false).format(false).build();
cluster.waitActive();
cluster.shutdown();
if (needRecovery) {
fail("expected the corrupted edit log to prevent normal startup");
}
} catch (IOException e) {
if (!needRecovery) {
LOG.error("Got unexpected failure with " + corruptor.getName() +
corruptor, e);
fail("got unexpected exception " + e.getMessage());
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
// Perform NameNode recovery.
// Even if there was nothing wrong previously (needRecovery == false),
// this should still work fine.
cluster = null;
try {
LOG.debug("running recovery...");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.enableManagedDfsDirsRedundancy(false).format(false)
.startupOption(recoverStartOpt).build();
} catch (IOException e) {
fail("caught IOException while trying to recover. " +
"message was " + e.getMessage() +
"\nstack trace\n" + StringUtils.stringifyException(e));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
// Make sure that we can start the cluster normally after recovery
cluster = null;
try {
LOG.debug("starting cluster normally after recovery...");
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.enableManagedDfsDirsRedundancy(false).format(false).build();
LOG.debug("successfully recovered the " + corruptor.getName() +
" corrupted edit log");
cluster.waitActive();
assertTrue(cluster.getFileSystem().exists(new Path(TEST_PATH)));
} catch (IOException e) {
fail("failed to recover. Error message: " + e.getMessage());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/** Test that we can successfully recover from a situation where the last
* entry in the edit log has been truncated. */
@Test(timeout=180000)
public void testRecoverTruncatedEditLog() throws IOException {
testNameNodeRecoveryImpl(new TruncatingCorruptor(), true);
testNameNodeRecoveryImpl(new TruncatingCorruptor(), false);
}
/** Test that we can successfully recover from a situation where the last
* entry in the edit log has been padded with garbage. */
@Test(timeout=180000)
public void testRecoverPaddedEditLog() throws IOException {
testNameNodeRecoveryImpl(new PaddingCorruptor(), true);
testNameNodeRecoveryImpl(new PaddingCorruptor(), false);
}
/** Test that don't need to recover from a situation where the last
* entry in the edit log has been padded with 0. */
@Test(timeout=180000)
public void testRecoverZeroPaddedEditLog() throws IOException {
testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), true);
testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)0), false);
}
/** Test that don't need to recover from a situation where the last
* entry in the edit log has been padded with 0xff bytes. */
@Test(timeout=180000)
public void testRecoverNegativeOnePaddedEditLog() throws IOException {
testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), true);
testNameNodeRecoveryImpl(new SafePaddingCorruptor((byte)-1), false);
}
}
| 21,503
| 31.931087
| 88
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressTestHelper.*;
import static org.junit.Assert.*;
import static org.mockito.Mockito.*;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.Arrays;
import java.util.Collections;
import java.util.Map;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
import org.junit.Before;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
public class TestStartupProgressServlet {
private HttpServletRequest req;
private HttpServletResponse resp;
private ByteArrayOutputStream respOut;
private StartupProgress startupProgress;
private StartupProgressServlet servlet;
@Before
public void setUp() throws Exception {
startupProgress = new StartupProgress();
ServletContext context = mock(ServletContext.class);
when(context.getAttribute(NameNodeHttpServer.STARTUP_PROGRESS_ATTRIBUTE_KEY))
.thenReturn(startupProgress);
servlet = mock(StartupProgressServlet.class);
when(servlet.getServletContext()).thenReturn(context);
doCallRealMethod().when(servlet).doGet(any(HttpServletRequest.class),
any(HttpServletResponse.class));
req = mock(HttpServletRequest.class);
respOut = new ByteArrayOutputStream();
PrintWriter writer = new PrintWriter(respOut);
resp = mock(HttpServletResponse.class);
when(resp.getWriter()).thenReturn(writer);
}
@Test
public void testInitialState() throws Exception {
String respBody = doGetAndReturnResponseBody();
assertNotNull(respBody);
Map<String, Object> expected = ImmutableMap.<String, Object>builder()
.put("percentComplete", 0.0f)
.put("phases", Arrays.<Object>asList(
ImmutableMap.<String, Object>builder()
.put("name", "LoadingFsImage")
.put("desc", "Loading fsimage")
.put("status", "PENDING")
.put("percentComplete", 0.0f)
.put("steps", Collections.emptyList())
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "LoadingEdits")
.put("desc", "Loading edits")
.put("status", "PENDING")
.put("percentComplete", 0.0f)
.put("steps", Collections.emptyList())
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "SavingCheckpoint")
.put("desc", "Saving checkpoint")
.put("status", "PENDING")
.put("percentComplete", 0.0f)
.put("steps", Collections.emptyList())
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "SafeMode")
.put("desc", "Safe mode")
.put("status", "PENDING")
.put("percentComplete", 0.0f)
.put("steps", Collections.emptyList())
.build()))
.build();
assertEquals(JSON.toString(expected), filterJson(respBody));
}
@Test
public void testRunningState() throws Exception {
setStartupProgressForRunningState(startupProgress);
String respBody = doGetAndReturnResponseBody();
assertNotNull(respBody);
Map<String, Object> expected = ImmutableMap.<String, Object>builder()
.put("percentComplete", 0.375f)
.put("phases", Arrays.<Object>asList(
ImmutableMap.<String, Object>builder()
.put("name", "LoadingFsImage")
.put("desc", "Loading fsimage")
.put("status", "COMPLETE")
.put("percentComplete", 1.0f)
.put("steps", Collections.<Object>singletonList(
ImmutableMap.<String, Object>builder()
.put("name", "Inodes")
.put("desc", "inodes")
.put("count", 100L)
.put("total", 100L)
.put("percentComplete", 1.0f)
.build()
))
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "LoadingEdits")
.put("desc", "Loading edits")
.put("status", "RUNNING")
.put("percentComplete", 0.5f)
.put("steps", Collections.<Object>singletonList(
ImmutableMap.<String, Object>builder()
.put("count", 100L)
.put("file", "file")
.put("size", 1000L)
.put("total", 200L)
.put("percentComplete", 0.5f)
.build()
))
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "SavingCheckpoint")
.put("desc", "Saving checkpoint")
.put("status", "PENDING")
.put("percentComplete", 0.0f)
.put("steps", Collections.emptyList())
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "SafeMode")
.put("desc", "Safe mode")
.put("status", "PENDING")
.put("percentComplete", 0.0f)
.put("steps", Collections.emptyList())
.build()))
.build();
assertEquals(JSON.toString(expected), filterJson(respBody));
}
@Test
public void testFinalState() throws Exception {
setStartupProgressForFinalState(startupProgress);
String respBody = doGetAndReturnResponseBody();
assertNotNull(respBody);
Map<String, Object> expected = ImmutableMap.<String, Object>builder()
.put("percentComplete", 1.0f)
.put("phases", Arrays.<Object>asList(
ImmutableMap.<String, Object>builder()
.put("name", "LoadingFsImage")
.put("desc", "Loading fsimage")
.put("status", "COMPLETE")
.put("percentComplete", 1.0f)
.put("steps", Collections.<Object>singletonList(
ImmutableMap.<String, Object>builder()
.put("name", "Inodes")
.put("desc", "inodes")
.put("count", 100L)
.put("total", 100L)
.put("percentComplete", 1.0f)
.build()
))
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "LoadingEdits")
.put("desc", "Loading edits")
.put("status", "COMPLETE")
.put("percentComplete", 1.0f)
.put("steps", Collections.<Object>singletonList(
ImmutableMap.<String, Object>builder()
.put("count", 200L)
.put("file", "file")
.put("size", 1000L)
.put("total", 200L)
.put("percentComplete", 1.0f)
.build()
))
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "SavingCheckpoint")
.put("desc", "Saving checkpoint")
.put("status", "COMPLETE")
.put("percentComplete", 1.0f)
.put("steps", Collections.<Object>singletonList(
ImmutableMap.<String, Object>builder()
.put("name", "Inodes")
.put("desc", "inodes")
.put("count", 300L)
.put("total", 300L)
.put("percentComplete", 1.0f)
.build()
))
.build(),
ImmutableMap.<String, Object>builder()
.put("name", "SafeMode")
.put("desc", "Safe mode")
.put("status", "COMPLETE")
.put("percentComplete", 1.0f)
.put("steps", Collections.<Object>singletonList(
ImmutableMap.<String, Object>builder()
.put("name", "AwaitingReportedBlocks")
.put("desc", "awaiting reported blocks")
.put("count", 400L)
.put("total", 400L)
.put("percentComplete", 1.0f)
.build()
))
.build()))
.build();
assertEquals(JSON.toString(expected), filterJson(respBody));
}
/**
* Calls doGet on the servlet, captures the response body as a string, and
* returns it to the caller.
*
* @return String response body
* @throws IOException thrown if there is an I/O error
*/
private String doGetAndReturnResponseBody() throws IOException {
servlet.doGet(req, resp);
return new String(respOut.toByteArray(), "UTF-8");
}
/**
* Filters the given JSON response body, removing elements that would impede
* testing. Specifically, it removes elapsedTime fields, because we cannot
* predict the exact values.
*
* @param str String to filter
* @return String filtered value
*/
private String filterJson(String str) {
return str.replaceAll("\"elapsedTime\":\\d+\\,", "")
.replaceAll("\\,\"elapsedTime\":\\d+", "");
}
}
| 9,558
| 35.208333
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditsDoubleBuffer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.io.DataOutputBuffer;
import org.junit.Test;
public class TestEditsDoubleBuffer {
@Test
public void testDoubleBuffer() throws IOException {
EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
assertTrue(buf.isFlushed());
byte[] data = new byte[100];
buf.writeRaw(data, 0, data.length);
assertEquals("Should count new data correctly",
data.length, buf.countBufferedBytes());
assertTrue("Writing to current buffer should not affect flush state",
buf.isFlushed());
// Swap the buffers
buf.setReadyToFlush();
assertEquals("Swapping buffers should still count buffered bytes",
data.length, buf.countBufferedBytes());
assertFalse(buf.isFlushed());
// Flush to a stream
DataOutputBuffer outBuf = new DataOutputBuffer();
buf.flushTo(outBuf);
assertEquals(data.length, outBuf.getLength());
assertTrue(buf.isFlushed());
assertEquals(0, buf.countBufferedBytes());
// Write some more
buf.writeRaw(data, 0, data.length);
assertEquals("Should count new data correctly",
data.length, buf.countBufferedBytes());
buf.setReadyToFlush();
buf.flushTo(outBuf);
assertEquals(data.length * 2, outBuf.getLength());
assertEquals(0, buf.countBufferedBytes());
outBuf.close();
}
@Test
public void shouldFailToCloseWhenUnflushed() throws IOException {
EditsDoubleBuffer buf = new EditsDoubleBuffer(1024);
buf.writeRaw(new byte[1], 0, 1);
try {
buf.close();
fail("Did not fail to close with unflushed data");
} catch (IOException ioe) {
if (!ioe.toString().contains("still to be flushed")) {
throw ioe;
}
}
}
}
| 2,783
| 31.752941
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.EnumSetWritable;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
/**
* Race between two threads simultaneously calling
* FSNamesystem.getAdditionalBlock().
*/
public class TestAddBlockRetry {
public static final Log LOG = LogFactory.getLog(TestAddBlockRetry.class);
private static final short REPLICATION = 3;
private Configuration conf;
private MiniDFSCluster cluster;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.build();
cluster.waitActive();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Retry addBlock() while another thread is in chooseTarget().
* See HDFS-4452.
*/
@Test
public void testRetryAddBlockWhileInChooseTarget() throws Exception {
final String src = "/testRetryAddBlockWhileInChooseTarget";
final FSNamesystem ns = cluster.getNamesystem();
final NamenodeProtocols nn = cluster.getNameNodeRpc();
// create file
nn.create(src, FsPermission.getFileDefault(),
"clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)),
true, (short)3, 1024, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock[] onRetryBlock = new LocatedBlock[1];
ns.readLock();
FSDirWriteFileOp.ValidateAddBlockResult r;
FSPermissionChecker pc = Mockito.mock(FSPermissionChecker.class);
try {
r = FSDirWriteFileOp.validateAddBlock(ns, pc, src,
HdfsConstants.GRANDFATHER_INODE_ID,
"clientName", null, onRetryBlock);
} finally {
ns.readUnlock();;
}
DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
ns.getBlockManager(), src, null, null, r);
assertNotNull("Targets must be generated", targets);
// run second addBlock()
LOG.info("Starting second addBlock for " + src);
nn.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null);
assertTrue("Penultimate block must be complete",
checkFileProgress(src, false));
LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
LocatedBlock lb2 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb2.getLocations().length);
// continue first addBlock()
ns.writeLock();
LocatedBlock newBlock;
try {
newBlock = FSDirWriteFileOp.storeAllocatedBlock(ns, src,
HdfsConstants.GRANDFATHER_INODE_ID, "clientName", null, targets);
} finally {
ns.writeUnlock();
}
assertEquals("Blocks are not equal", lb2.getBlock(), newBlock.getBlock());
// check locations
lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
assertEquals("Must be one block", 1, lbs.getLocatedBlocks().size());
LocatedBlock lb1 = lbs.get(0);
assertEquals("Wrong replication", REPLICATION, lb1.getLocations().length);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
}
boolean checkFileProgress(String src, boolean checkall) throws IOException {
final FSNamesystem ns = cluster.getNamesystem();
ns.readLock();
try {
return ns.checkFileProgress(src, ns.dir.getINode(src).asFile(), checkall);
} finally {
ns.readUnlock();
}
}
/*
* Since NameNode will not persist any locations of the block, addBlock()
* retry call after restart NN should re-select the locations and return to
* client. refer HDFS-5257
*/
@Test
public void testAddBlockRetryShouldReturnBlockWithLocations()
throws Exception {
final String src = "/testAddBlockRetryShouldReturnBlockWithLocations";
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
// create file
nameNodeRpc.create(src, FsPermission.getFileDefault(), "clientName",
new EnumSetWritable<CreateFlag>(EnumSet.of(CreateFlag.CREATE)), true,
(short) 3, 1024, null);
// start first addBlock()
LOG.info("Starting first addBlock for " + src);
LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null);
assertTrue("Block locations should be present",
lb1.getLocations().length > 0);
cluster.restartNameNode();
nameNodeRpc = cluster.getNameNodeRpc();
LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null);
assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
}
}
| 6,522
| 36.274286
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.net.URISyntaxException;
import java.util.HashMap;
import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume;
import org.apache.hadoop.test.PathUtils;
import org.apache.hadoop.util.Time;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
public class TestNameNodeResourceChecker {
private final static File BASE_DIR = PathUtils.getTestDir(TestNameNodeResourceChecker.class);
private Configuration conf;
private File baseDir;
private File nameDir;
@Before
public void setUp () throws IOException {
conf = new Configuration();
nameDir = new File(BASE_DIR, "resource-check-name-dir");
nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
}
/**
* Tests that hasAvailableDiskSpace returns true if disk usage is below
* threshold.
*/
@Test
public void testCheckAvailability()
throws IOException {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, 0);
NameNodeResourceChecker nb = new NameNodeResourceChecker(conf);
assertTrue(
"isResourceAvailable must return true if " +
"disk usage is lower than threshold",
nb.hasAvailableDiskSpace());
}
/**
* Tests that hasAvailableDiskSpace returns false if disk usage is above
* threshold.
*/
@Test
public void testCheckAvailabilityNeg() throws IOException {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE);
NameNodeResourceChecker nb = new NameNodeResourceChecker(conf);
assertFalse(
"isResourceAvailable must return false if " +
"disk usage is higher than threshold",
nb.hasAvailableDiskSpace());
}
/**
* Tests that NameNode resource monitor causes the NN to enter safe mode when
* resources are low.
*/
@Test
public void testCheckThatNameNodeResourceMonitorIsRunning()
throws IOException, InterruptedException {
MiniDFSCluster cluster = null;
try {
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(1).build();
NameNodeResourceChecker mockResourceChecker = Mockito.mock(NameNodeResourceChecker.class);
Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(true);
cluster.getNameNode().getNamesystem().nnResourceChecker = mockResourceChecker;
cluster.waitActive();
String name = NameNodeResourceMonitor.class.getName();
boolean isNameNodeMonitorRunning = false;
Set<Thread> runningThreads = Thread.getAllStackTraces().keySet();
for (Thread runningThread : runningThreads) {
if (runningThread.toString().startsWith("Thread[" + name)) {
isNameNodeMonitorRunning = true;
break;
}
}
assertTrue("NN resource monitor should be running",
isNameNodeMonitorRunning);
assertFalse("NN should not presently be in safe mode",
cluster.getNameNode().isInSafeMode());
Mockito.when(mockResourceChecker.hasAvailableDiskSpace()).thenReturn(false);
// Make sure the NNRM thread has a chance to run.
long startMillis = Time.now();
while (!cluster.getNameNode().isInSafeMode() &&
Time.now() < startMillis + (60 * 1000)) {
Thread.sleep(1000);
}
assertTrue("NN should be in safe mode after resources crossed threshold",
cluster.getNameNode().isInSafeMode());
} finally {
if (cluster != null)
cluster.shutdown();
}
}
/**
* Tests that only a single space check is performed if two name dirs are
* supplied which are on the same volume.
*/
@Test
public void testChecking2NameDirsOnOneVolume() throws IOException {
Configuration conf = new Configuration();
File nameDir1 = new File(BASE_DIR, "name-dir1");
File nameDir2 = new File(BASE_DIR, "name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE);
NameNodeResourceChecker nb = new NameNodeResourceChecker(conf);
assertEquals("Should not check the same volume more than once.",
1, nb.getVolumesLowOnSpace().size());
}
/**
* Tests that only a single space check is performed if extra volumes are
* configured manually which also coincide with a volume the name dir is on.
*/
@Test
public void testCheckingExtraVolumes() throws IOException {
Configuration conf = new Configuration();
File nameDir = new File(BASE_DIR, "name-dir");
nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
conf.setLong(DFSConfigKeys.DFS_NAMENODE_DU_RESERVED_KEY, Long.MAX_VALUE);
NameNodeResourceChecker nb = new NameNodeResourceChecker(conf);
assertEquals("Should not check the same volume more than once.",
1, nb.getVolumesLowOnSpace().size());
}
/**
* Test that the NN is considered to be out of resources only once all
* redundant configured volumes are low on resources, or when any required
* volume is low on resources.
*/
@Test
public void testLowResourceVolumePolicy() throws IOException, URISyntaxException {
Configuration conf = new Configuration();
File nameDir1 = new File(BASE_DIR, "name-dir1");
File nameDir2 = new File(BASE_DIR, "name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_MINIMUM_KEY, 2);
NameNodeResourceChecker nnrc = new NameNodeResourceChecker(conf);
// For the purpose of this test, we need to force the name dirs to appear to
// be on different volumes.
Map<String, CheckedVolume> volumes = new HashMap<String, CheckedVolume>();
CheckedVolume volume1 = Mockito.mock(CheckedVolume.class);
CheckedVolume volume2 = Mockito.mock(CheckedVolume.class);
CheckedVolume volume3 = Mockito.mock(CheckedVolume.class);
CheckedVolume volume4 = Mockito.mock(CheckedVolume.class);
CheckedVolume volume5 = Mockito.mock(CheckedVolume.class);
Mockito.when(volume1.isResourceAvailable()).thenReturn(true);
Mockito.when(volume2.isResourceAvailable()).thenReturn(true);
Mockito.when(volume3.isResourceAvailable()).thenReturn(true);
Mockito.when(volume4.isResourceAvailable()).thenReturn(true);
Mockito.when(volume5.isResourceAvailable()).thenReturn(true);
// Make volumes 4 and 5 required.
Mockito.when(volume4.isRequired()).thenReturn(true);
Mockito.when(volume5.isRequired()).thenReturn(true);
volumes.put("volume1", volume1);
volumes.put("volume2", volume2);
volumes.put("volume3", volume3);
volumes.put("volume4", volume4);
volumes.put("volume5", volume5);
nnrc.setVolumes(volumes);
// Initially all dirs have space.
assertTrue(nnrc.hasAvailableDiskSpace());
// 1/3 redundant dir is low on space.
Mockito.when(volume1.isResourceAvailable()).thenReturn(false);
assertTrue(nnrc.hasAvailableDiskSpace());
// 2/3 redundant dirs are low on space.
Mockito.when(volume2.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
// Lower the minimum number of redundant volumes that must be available.
nnrc.setMinimumReduntdantVolumes(1);
assertTrue(nnrc.hasAvailableDiskSpace());
// Just one required dir is low on space.
Mockito.when(volume3.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
// Just the other required dir is low on space.
Mockito.when(volume3.isResourceAvailable()).thenReturn(true);
Mockito.when(volume4.isResourceAvailable()).thenReturn(false);
assertFalse(nnrc.hasAvailableDiskSpace());
}
}
| 9,563
| 37.720648
| 96
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* Test the MiniDFSCluster functionality that allows "dfs.datanode.address",
* "dfs.datanode.http.address", and "dfs.datanode.ipc.address" to be
* configurable. The MiniDFSCluster.startDataNodes() API now has a parameter
* that will check these properties if told to do so.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
import static org.junit.Assert.assertEquals;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
public class TestNameNodeRpcServer {
@Test
public void testNamenodeRpcBindAny() throws IOException {
Configuration conf = new HdfsConfiguration();
// The name node in MiniDFSCluster only binds to 127.0.0.1.
// We can set the bind address to 0.0.0.0 to make it listen
// to all interfaces.
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
.getClientRpcServer().getListenerAddress().getHostName());
} finally {
if (cluster != null) {
cluster.shutdown();
}
// Reset the config
conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY);
}
}
}
| 2,278
| 34.609375
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeAcl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.conf.Configuration;
import org.junit.BeforeClass;
/**
* Tests NameNode interaction for all ACL modification APIs. This test suite
* also covers interaction of setPermission with inodes that have ACLs.
*/
public class TestNameNodeAcl extends FSAclBaseTest {
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
startCluster();
}
}
| 1,264
| 35.142857
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeOptionParsing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.RollingUpgradeStartupOption;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.Assert;
import org.junit.Test;
public class TestNameNodeOptionParsing {
@Test(timeout = 10000)
public void testUpgrade() {
StartupOption opt = null;
// UPGRADE is set, but nothing else
opt = NameNode.parseArguments(new String[] {"-upgrade"});
assertEquals(opt, StartupOption.UPGRADE);
assertNull(opt.getClusterId());
assertTrue(FSImageFormat.renameReservedMap.isEmpty());
// cluster ID is set
opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid",
"mycid" });
assertEquals(StartupOption.UPGRADE, opt);
assertEquals("mycid", opt.getClusterId());
assertTrue(FSImageFormat.renameReservedMap.isEmpty());
// Everything is set
opt = NameNode.parseArguments(new String[] { "-upgrade", "-clusterid",
"mycid", "-renameReserved",
".snapshot=.my-snapshot,.reserved=.my-reserved" });
assertEquals(StartupOption.UPGRADE, opt);
assertEquals("mycid", opt.getClusterId());
assertEquals(".my-snapshot",
FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(".my-reserved",
FSImageFormat.renameReservedMap.get(".reserved"));
// Reset the map
FSImageFormat.renameReservedMap.clear();
// Everything is set, but in a different order
opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
".reserved=.my-reserved,.snapshot=.my-snapshot", "-clusterid",
"mycid"});
assertEquals(StartupOption.UPGRADE, opt);
assertEquals("mycid", opt.getClusterId());
assertEquals(".my-snapshot",
FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(".my-reserved",
FSImageFormat.renameReservedMap.get(".reserved"));
// Try the default renameReserved
opt = NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved"});
assertEquals(StartupOption.UPGRADE, opt);
assertEquals(
".snapshot." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION
+ ".UPGRADE_RENAMED",
FSImageFormat.renameReservedMap.get(".snapshot"));
assertEquals(
".reserved." + HdfsServerConstants.NAMENODE_LAYOUT_VERSION
+ ".UPGRADE_RENAMED",
FSImageFormat.renameReservedMap.get(".reserved"));
// Try some error conditions
try {
opt =
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
".reserved=.my-reserved,.not-reserved=.my-not-reserved" });
} catch (IllegalArgumentException e) {
assertExceptionContains("Unknown reserved path", e);
}
try {
opt =
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
".reserved=.my-reserved,.snapshot=.snapshot" });
} catch (IllegalArgumentException e) {
assertExceptionContains("Invalid rename path", e);
}
try {
opt =
NameNode.parseArguments(new String[] { "-upgrade", "-renameReserved",
".snapshot=.reserved" });
} catch (IllegalArgumentException e) {
assertExceptionContains("Invalid rename path", e);
}
opt = NameNode.parseArguments(new String[] { "-upgrade", "-cid"});
assertNull(opt);
}
@Test(timeout = 10000)
public void testRollingUpgrade() {
{
final String[] args = {"-rollingUpgrade"};
final StartupOption opt = NameNode.parseArguments(args);
assertNull(opt);
}
{
final String[] args = {"-rollingUpgrade", "started"};
final StartupOption opt = NameNode.parseArguments(args);
assertEquals(StartupOption.ROLLINGUPGRADE, opt);
assertEquals(RollingUpgradeStartupOption.STARTED, opt.getRollingUpgradeStartupOption());
assertTrue(RollingUpgradeStartupOption.STARTED.matches(opt));
}
{
final String[] args = {"-rollingUpgrade", "downgrade"};
final StartupOption opt = NameNode.parseArguments(args);
assertEquals(StartupOption.ROLLINGUPGRADE, opt);
assertEquals(RollingUpgradeStartupOption.DOWNGRADE, opt.getRollingUpgradeStartupOption());
assertTrue(RollingUpgradeStartupOption.DOWNGRADE.matches(opt));
}
{
final String[] args = {"-rollingUpgrade", "rollback"};
final StartupOption opt = NameNode.parseArguments(args);
assertEquals(StartupOption.ROLLINGUPGRADE, opt);
assertEquals(RollingUpgradeStartupOption.ROLLBACK, opt.getRollingUpgradeStartupOption());
assertTrue(RollingUpgradeStartupOption.ROLLBACK.matches(opt));
}
{
final String[] args = {"-rollingUpgrade", "foo"};
try {
NameNode.parseArguments(args);
Assert.fail();
} catch(IllegalArgumentException iae) {
// the exception is expected.
}
}
}
}
| 6,014
| 38.834437
| 96
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyLong;
import static org.mockito.Matchers.anyString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileReader;
import java.io.IOException;
import java.io.PrintStream;
import java.io.PrintWriter;
import java.io.RandomAccessFile;
import java.io.StringWriter;
import java.io.Writer;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.channels.FileChannel;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.Result;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.hdfs.tools.DFSck;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ToolRunner;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.apache.log4j.PatternLayout;
import org.apache.log4j.RollingFileAppender;
import org.junit.Test;
import com.google.common.collect.Sets;
/**
* A JUnit test for doing fsck
*/
public class TestFsck {
static final String auditLogFile = System.getProperty("test.build.dir",
"build/test") + "/TestFsck-audit.log";
// Pattern for:
// allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null
static final Pattern fsckPattern = Pattern.compile(
"allowed=.*?\\s" +
"ugi=.*?\\s" +
"ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
"cmd=fsck\\ssrc=\\/\\sdst=null\\s" +
"perm=null\\s" + "proto=.*");
static final Pattern getfileinfoPattern = Pattern.compile(
"allowed=.*?\\s" +
"ugi=.*?\\s" +
"ip=/\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\s" +
"cmd=getfileinfo\\ssrc=\\/\\sdst=null\\s" +
"perm=null\\s" + "proto=.*");
static final Pattern numCorruptBlocksPattern = Pattern.compile(
".*Corrupt blocks:\t\t([0123456789]*).*");
private static final String LINE_SEPARATOR =
System.getProperty("line.separator");
static String runFsck(Configuration conf, int expectedErrCode,
boolean checkErrorCode,String... path)
throws Exception {
ByteArrayOutputStream bStream = new ByteArrayOutputStream();
PrintStream out = new PrintStream(bStream, true);
((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.ALL);
int errCode = ToolRunner.run(new DFSck(conf, out), path);
if (checkErrorCode) {
assertEquals(expectedErrCode, errCode);
}
((Log4JLogger)FSPermissionChecker.LOG).getLogger().setLevel(Level.INFO);
FSImage.LOG.error("OUTPUT = " + bStream.toString());
return bStream.toString();
}
/** do fsck */
@Test
public void testFsck() throws Exception {
DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
setNumFiles(20).build();
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = cluster.getFileSystem();
final String fileName = "/srcdat";
util.createFiles(fs, fileName);
util.waitReplication(fs, fileName, (short)3);
final Path file = new Path(fileName);
long aTime = fs.getFileStatus(file).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr = runFsck(conf, 0, true, "/");
verifyAuditLogs();
assertEquals(aTime, fs.getFileStatus(file).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
if (fs != null) {try{fs.close();} catch(Exception e){}}
cluster.shutdown();
// restart the cluster; bring up namenode but not the data nodes
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(0).format(false).build();
outStr = runFsck(conf, 1, true, "/");
// expect the result is corrupt
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
System.out.println(outStr);
// bring up data nodes & cleanup cluster
cluster.startDataNodes(conf, 4, true, null, null);
cluster.waitActive();
cluster.waitClusterUp();
fs = cluster.getFileSystem();
util.cleanup(fs, "/srcdat");
} finally {
if (fs != null) {try{fs.close();} catch(Exception e){}}
if (cluster != null) { cluster.shutdown(); }
}
}
/** Sets up log4j logger for auditlogs */
private void setupAuditLogs() throws IOException {
File file = new File(auditLogFile);
if (file.exists()) {
file.delete();
}
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.INFO);
PatternLayout layout = new PatternLayout("%m%n");
RollingFileAppender appender = new RollingFileAppender(layout, auditLogFile);
logger.addAppender(appender);
}
private void verifyAuditLogs() throws IOException {
// Turn off the logs
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.OFF);
BufferedReader reader = null;
try {
// Audit log should contain one getfileinfo and one fsck
reader = new BufferedReader(new FileReader(auditLogFile));
String line;
// one extra getfileinfo stems from resolving the path
//
for (int i = 0; i < 2; i++) {
line = reader.readLine();
assertNotNull(line);
assertTrue("Expected getfileinfo event not found in audit log",
getfileinfoPattern.matcher(line).matches());
}
line = reader.readLine();
assertNotNull(line);
assertTrue("Expected fsck event not found in audit log", fsckPattern
.matcher(line).matches());
assertNull("Unexpected event in audit log", reader.readLine());
} finally {
// Close the reader and remove the appender to release the audit log file
// handle after verifying the content of the file.
if (reader != null) {
reader.close();
}
if (logger != null) {
logger.removeAllAppenders();
}
}
}
@Test
public void testFsckNonExistent() throws Exception {
DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
setNumFiles(20).build();
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = cluster.getFileSystem();
util.createFiles(fs, "/srcdat");
util.waitReplication(fs, "/srcdat", (short)3);
String outStr = runFsck(conf, 0, true, "/non-existent");
assertEquals(-1, outStr.indexOf(NamenodeFsck.HEALTHY_STATUS));
System.out.println(outStr);
util.cleanup(fs, "/srcdat");
} finally {
if (fs != null) {try{fs.close();} catch(Exception e){}}
if (cluster != null) { cluster.shutdown(); }
}
}
/** Test fsck with permission set on inodes */
@Test
public void testFsckPermission() throws Exception {
final DFSTestUtil util = new DFSTestUtil.Builder().
setName(getClass().getSimpleName()).setNumFiles(20).build();
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
MiniDFSCluster cluster = null;
try {
// Create a cluster with the current user, write some files
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
final MiniDFSCluster c2 = cluster;
final String dir = "/dfsck";
final Path dirpath = new Path(dir);
final FileSystem fs = c2.getFileSystem();
util.createFiles(fs, dir);
util.waitReplication(fs, dir, (short) 3);
fs.setPermission(dirpath, new FsPermission((short) 0700));
// run DFSck as another user, should fail with permission issue
UserGroupInformation fakeUGI = UserGroupInformation.createUserForTesting(
"ProbablyNotARealUserName", new String[] { "ShangriLa" });
fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
System.out.println(runFsck(conf, -1, true, dir));
return null;
}
});
// set permission and try DFSck again as the fake user, should succeed
fs.setPermission(dirpath, new FsPermission((short) 0777));
fakeUGI.doAs(new PrivilegedExceptionAction<Object>() {
@Override
public Object run() throws Exception {
final String outStr = runFsck(conf, 0, true, dir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
return null;
}
});
util.cleanup(fs, dir);
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
@Test
public void testFsckMove() throws Exception {
Configuration conf = new HdfsConfiguration();
final int DFS_BLOCK_SIZE = 1024;
final int NUM_DATANODES = 4;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFS_BLOCK_SIZE);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
DFSTestUtil util = new DFSTestUtil("TestFsck", 5, 3,
(5 * DFS_BLOCK_SIZE) + (DFS_BLOCK_SIZE - 1), 5 * DFS_BLOCK_SIZE);
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(NUM_DATANODES).build();
String topDir = "/srcdat";
fs = cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs, topDir);
util.waitReplication(fs, topDir, (short)3);
String outStr = runFsck(conf, 0, true, "/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
String fileNames[] = util.getFileNames(topDir);
CorruptedTestFile ctFiles[] = new CorruptedTestFile[] {
new CorruptedTestFile(fileNames[0], Sets.newHashSet(0),
dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
new CorruptedTestFile(fileNames[1], Sets.newHashSet(2, 3),
dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
new CorruptedTestFile(fileNames[2], Sets.newHashSet(4),
dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
new CorruptedTestFile(fileNames[3], Sets.newHashSet(0, 1, 2, 3),
dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE),
new CorruptedTestFile(fileNames[4], Sets.newHashSet(1, 2, 3, 4),
dfsClient, NUM_DATANODES, DFS_BLOCK_SIZE)
};
int totalMissingBlocks = 0;
for (CorruptedTestFile ctFile : ctFiles) {
totalMissingBlocks += ctFile.getTotalMissingBlocks();
}
for (CorruptedTestFile ctFile : ctFiles) {
ctFile.removeBlocks(cluster);
}
// Wait for fsck to discover all the missing blocks
while (true) {
outStr = runFsck(conf, 1, false, "/");
String numCorrupt = null;
for (String line : outStr.split(LINE_SEPARATOR)) {
Matcher m = numCorruptBlocksPattern.matcher(line);
if (m.matches()) {
numCorrupt = m.group(1);
break;
}
}
if (numCorrupt == null) {
throw new IOException("failed to find number of corrupt " +
"blocks in fsck output.");
}
if (numCorrupt.equals(Integer.toString(totalMissingBlocks))) {
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
break;
}
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
}
}
// Copy the non-corrupt blocks of corruptFileName to lost+found.
outStr = runFsck(conf, 1, false, "/", "-move");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
// Make sure that we properly copied the block files from the DataNodes
// to lost+found
for (CorruptedTestFile ctFile : ctFiles) {
ctFile.checkSalvagedRemains();
}
// Fix the filesystem by removing corruptFileName
outStr = runFsck(conf, 1, true, "/", "-delete");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
// Check to make sure we have a healthy filesystem
outStr = runFsck(conf, 0, true, "/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
util.cleanup(fs, topDir);
} finally {
if (fs != null) {try{fs.close();} catch(Exception e){}}
if (cluster != null) { cluster.shutdown(); }
}
}
static private class CorruptedTestFile {
final private String name;
final private Set<Integer> blocksToCorrupt;
final private DFSClient dfsClient;
final private int numDataNodes;
final private int blockSize;
final private byte[] initialContents;
public CorruptedTestFile(String name, Set<Integer> blocksToCorrupt,
DFSClient dfsClient, int numDataNodes, int blockSize)
throws IOException {
this.name = name;
this.blocksToCorrupt = blocksToCorrupt;
this.dfsClient = dfsClient;
this.numDataNodes = numDataNodes;
this.blockSize = blockSize;
this.initialContents = cacheInitialContents();
}
public int getTotalMissingBlocks() {
return blocksToCorrupt.size();
}
private byte[] cacheInitialContents() throws IOException {
HdfsFileStatus status = dfsClient.getFileInfo(name);
byte[] content = new byte[(int)status.getLen()];
DFSInputStream in = null;
try {
in = dfsClient.open(name);
IOUtils.readFully(in, content, 0, content.length);
} finally {
in.close();
}
return content;
}
public void removeBlocks(MiniDFSCluster cluster)
throws AccessControlException, FileNotFoundException,
UnresolvedLinkException, IOException {
for (int corruptIdx : blocksToCorrupt) {
// Corrupt a block by deleting it
ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
name, blockSize * corruptIdx, Long.MAX_VALUE).get(0).getBlock();
for (int i = 0; i < numDataNodes; i++) {
File blockFile = cluster.getBlockFile(i, block);
if(blockFile != null && blockFile.exists()) {
assertTrue(blockFile.delete());
}
}
}
}
public void checkSalvagedRemains() throws IOException {
int chainIdx = 0;
HdfsFileStatus status = dfsClient.getFileInfo(name);
long length = status.getLen();
int numBlocks = (int)((length + blockSize - 1) / blockSize);
DFSInputStream in = null;
byte[] blockBuffer = new byte[blockSize];
try {
for (int blockIdx = 0; blockIdx < numBlocks; blockIdx++) {
if (blocksToCorrupt.contains(blockIdx)) {
if (in != null) {
in.close();
in = null;
}
continue;
}
if (in == null) {
in = dfsClient.open("/lost+found" + name + "/" + chainIdx);
chainIdx++;
}
int len = blockBuffer.length;
if (blockIdx == (numBlocks - 1)) {
// The last block might not be full-length
len = (int)(in.getFileLength() % blockSize);
if (len == 0) len = blockBuffer.length;
}
IOUtils.readFully(in, blockBuffer, 0, len);
int startIdx = blockIdx * blockSize;
for (int i = 0; i < len; i++) {
if (initialContents[startIdx + i] != blockBuffer[i]) {
throw new IOException("salvaged file " + name + " differed " +
"from what we expected on block " + blockIdx);
}
}
}
} finally {
IOUtils.cleanup(null, in);
}
}
}
@Test
public void testFsckMoveAndDelete() throws Exception {
final int MAX_MOVE_TRIES = 5;
DFSTestUtil util = new DFSTestUtil.Builder().
setName("TestFsckMoveAndDelete").setNumFiles(5).build();
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir = "/srcdat";
fs = cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs, topDir);
util.waitReplication(fs, topDir, (short)3);
String outStr = runFsck(conf, 0, true, "/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// Corrupt a block by deleting it
String[] fileNames = util.getFileNames(topDir);
DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
String corruptFileName = fileNames[0];
ExtendedBlock block = dfsClient.getNamenode().getBlockLocations(
corruptFileName, 0, Long.MAX_VALUE).get(0).getBlock();
for (int i=0; i<4; i++) {
File blockFile = cluster.getBlockFile(i, block);
if(blockFile != null && blockFile.exists()) {
assertTrue(blockFile.delete());
}
}
// We excpect the filesystem to be corrupted
outStr = runFsck(conf, 1, false, "/");
while (!outStr.contains(NamenodeFsck.CORRUPT_STATUS)) {
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
}
outStr = runFsck(conf, 1, false, "/");
}
// After a fsck -move, the corrupted file should still exist.
for (int i = 0; i < MAX_MOVE_TRIES; i++) {
outStr = runFsck(conf, 1, true, "/", "-move" );
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
String[] newFileNames = util.getFileNames(topDir);
boolean found = false;
for (String f : newFileNames) {
if (f.equals(corruptFileName)) {
found = true;
break;
}
}
assertTrue(found);
}
// Fix the filesystem by moving corrupted files to lost+found
outStr = runFsck(conf, 1, true, "/", "-move", "-delete");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
// Check to make sure we have healthy filesystem
outStr = runFsck(conf, 0, true, "/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
util.cleanup(fs, topDir);
if (fs != null) {try{fs.close();} catch(Exception e){}}
cluster.shutdown();
} finally {
if (fs != null) {try{fs.close();} catch(Exception e){}}
if (cluster != null) { cluster.shutdown(); }
}
}
@Test
public void testFsckOpenFiles() throws Exception {
DFSTestUtil util = new DFSTestUtil.Builder().setName("TestFsck").
setNumFiles(4).build();
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
String topDir = "/srcdat";
String randomString = "HADOOP ";
fs = cluster.getFileSystem();
cluster.waitActive();
util.createFiles(fs, topDir);
util.waitReplication(fs, topDir, (short)3);
String outStr = runFsck(conf, 0, true, "/");
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// Open a file for writing and do not close for now
Path openFile = new Path(topDir + "/openFile");
FSDataOutputStream out = fs.create(openFile);
int writeCount = 0;
while (writeCount != 100) {
out.write(randomString.getBytes());
writeCount++;
}
// We expect the filesystem to be HEALTHY and show one open file
outStr = runFsck(conf, 0, true, topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
// Use -openforwrite option to list open files
outStr = runFsck(conf, 0, true, topDir, "-openforwrite");
System.out.println(outStr);
assertTrue(outStr.contains("OPENFORWRITE"));
assertTrue(outStr.contains("openFile"));
// Close the file
out.close();
// Now, fsck should show HEALTHY fs and should not show any open files
outStr = runFsck(conf, 0, true, topDir);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertFalse(outStr.contains("OPENFORWRITE"));
util.cleanup(fs, topDir);
if (fs != null) {try{fs.close();} catch(Exception e){}}
cluster.shutdown();
} finally {
if (fs != null) {try{fs.close();} catch(Exception e){}}
if (cluster != null) { cluster.shutdown(); }
}
}
@Test
public void testCorruptBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
FileSystem fs = null;
DFSClient dfsClient = null;
LocatedBlocks blocks = null;
int replicaCount = 0;
Random random = new Random();
String outStr = null;
short factor = 1;
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path file1 = new Path("/testCorruptBlock");
DFSTestUtil.createFile(fs, file1, 1024, factor, 0);
// Wait until file replication has completed
DFSTestUtil.waitReplication(fs, file1, factor);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
// Make sure filesystem is in healthy state
outStr = runFsck(conf, 0, true, "/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// corrupt replicas
File blockFile = cluster.getBlockFile(0, block);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel();
String badString = "BADBAD";
int rand = random.nextInt((int) channel.size()/2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
}
// Read the file to trigger reportBadBlocks
try {
IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
true);
} catch (IOException ie) {
// Ignore exception
}
dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
blocks = dfsClient.getNamenode().
getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
while (replicaCount != factor) {
try {
Thread.sleep(100);
} catch (InterruptedException ignore) {
}
blocks = dfsClient.getNamenode().
getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
}
assertTrue (blocks.get(0).isCorrupt());
// Check if fsck reports the same
outStr = runFsck(conf, 1, true, "/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("testCorruptBlock"));
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
public void testUnderMinReplicatedBlock() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
// Set minReplication to 2
short minReplication=2;
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY,minReplication);
FileSystem fs = null;
DFSClient dfsClient = null;
LocatedBlocks blocks = null;
int replicaCount = 0;
Random random = new Random();
String outStr = null;
short factor = 1;
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
cluster.waitActive();
fs = cluster.getFileSystem();
Path file1 = new Path("/testUnderMinReplicatedBlock");
DFSTestUtil.createFile(fs, file1, 1024, minReplication, 0);
// Wait until file replication has completed
DFSTestUtil.waitReplication(fs, file1, minReplication);
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, file1);
// Make sure filesystem is in healthy state
outStr = runFsck(conf, 0, true, "/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// corrupt the first replica
File blockFile = cluster.getBlockFile(0, block);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel();
String badString = "BADBAD";
int rand = random.nextInt((int) channel.size()/2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
}
dfsClient = new DFSClient(new InetSocketAddress("localhost",
cluster.getNameNodePort()), conf);
blocks = dfsClient.getNamenode().
getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
while (replicaCount != factor) {
try {
Thread.sleep(100);
// Read the file to trigger reportBadBlocks
try {
IOUtils.copyBytes(fs.open(file1), new IOUtils.NullOutputStream(), conf,
true);
} catch (IOException ie) {
// Ignore exception
}
System.out.println("sleep in try: replicaCount="+replicaCount+" factor="+factor);
} catch (InterruptedException ignore) {
}
blocks = dfsClient.getNamenode().
getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
replicaCount = blocks.get(0).getLocations().length;
}
// Check if fsck reports the same
outStr = runFsck(conf, 0, true, "/");
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertTrue(outStr.contains("UNDER MIN REPL'D BLOCKS:\t1 (100.0 %)"));
assertTrue(outStr.contains("dfs.namenode.replication.min:\t2"));
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
@Test(timeout = 60000)
public void testFsckReplicaDetails() throws Exception {
final short REPL_FACTOR = 1;
short NUM_DN = 1;
final long blockSize = 512;
final long fileSize = 1024;
boolean checkDecommissionInProgress = false;
String[] racks = { "/rack1" };
String[] hosts = { "host1" };
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
MiniDFSCluster cluster;
DistributedFileSystem dfs;
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts).racks(racks).build();
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
// create files
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
DFSTestUtil.createFile(dfs, path, fileSize, REPL_FACTOR, 1000L);
DFSTestUtil.waitReplication(dfs, path, REPL_FACTOR);
try {
// make sure datanode that has replica is fine before decommission
String fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
assertTrue(fsckOut.contains(NamenodeFsck.HEALTHY_STATUS));
assertTrue(fsckOut.contains("(LIVE)"));
// decommission datanode
ExtendedBlock eb = DFSTestUtil.getFirstBlock(dfs, path);
DatanodeDescriptor dn =
cluster.getNameNode().getNamesystem().getBlockManager()
.getBlockCollection(eb.getLocalBlock()).getBlocks()[0].getDatanode(0);
cluster.getNameNode().getNamesystem().getBlockManager().getDatanodeManager()
.getDecomManager().startDecommission(dn);
String dnName = dn.getXferAddr();
// check the replica status while decommissioning
fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
assertTrue(fsckOut.contains("(DECOMMISSIONING)"));
// Start 2nd Datanode and wait for decommission to start
cluster.startDataNodes(conf, 1, true, null, null, null);
DatanodeInfo datanodeInfo = null;
do {
Thread.sleep(2000);
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (dnName.equals(info.getXferAddr())) {
datanodeInfo = info;
}
}
if (!checkDecommissionInProgress && datanodeInfo != null
&& datanodeInfo.isDecommissionInProgress()) {
checkDecommissionInProgress = true;
}
} while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
// check the replica status after decommission is done
fsckOut = runFsck(conf, 0, true, testFile, "-files", "-blocks", "-replicaDetails");
assertTrue(fsckOut.contains("(DECOMMISSIONED)"));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/** Test if fsck can return -1 in case of failure
*
* @throws Exception
*/
@Test
public void testFsckError() throws Exception {
MiniDFSCluster cluster = null;
try {
// bring up a one-node cluster
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
String fileName = "/test.txt";
Path filePath = new Path(fileName);
FileSystem fs = cluster.getFileSystem();
// create a one-block file
DFSTestUtil.createFile(fs, filePath, 1L, (short)1, 1L);
DFSTestUtil.waitReplication(fs, filePath, (short)1);
// intentionally corrupt NN data structure
INodeFile node = (INodeFile) cluster.getNamesystem().dir.getINode
(fileName, true);
final BlockInfo[] blocks = node.getBlocks();
assertEquals(blocks.length, 1);
blocks[0].setNumBytes(-1L); // set the block length to be negative
// run fsck and expect a failure with -1 as the error code
String outStr = runFsck(conf, -1, true, fileName);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.FAILURE_STATUS));
// clean up file system
fs.delete(filePath, true);
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
/** check if option -list-corruptfiles of fsck command works properly */
@Test
public void testFsckListCorruptFilesBlocks() throws Exception {
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
conf.setInt(DFSConfigKeys.DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, 1);
FileSystem fs = null;
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fs = cluster.getFileSystem();
DFSTestUtil util = new DFSTestUtil.Builder().
setName("testGetCorruptFiles").setNumFiles(3).setMaxLevels(1).
setMaxSize(1024).build();
util.createFiles(fs, "/corruptData", (short) 1);
util.waitReplication(fs, "/corruptData", (short) 1);
// String outStr = runFsck(conf, 0, true, "/corruptData", "-list-corruptfileblocks");
String outStr = runFsck(conf, 0, false, "/corruptData", "-list-corruptfileblocks");
System.out.println("1. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
// delete the blocks
final String bpid = cluster.getNamesystem().getBlockPoolId();
for (int i=0; i<4; i++) {
for (int j=0; j<=1; j++) {
File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
List<File> metadataFiles = MiniDFSCluster.getAllBlockMetadataFiles(
data_dir);
if (metadataFiles == null)
continue;
for (File metadataFile : metadataFiles) {
File blockFile = Block.metaToBlockFile(metadataFile);
assertTrue("Cannot remove file.", blockFile.delete());
assertTrue("Cannot remove file.", metadataFile.delete());
}
}
}
// wait for the namenode to see the corruption
final NamenodeProtocols namenode = cluster.getNameNodeRpc();
CorruptFileBlocks corruptFileBlocks = namenode
.listCorruptFileBlocks("/corruptData", null);
int numCorrupt = corruptFileBlocks.getFiles().length;
while (numCorrupt == 0) {
Thread.sleep(1000);
corruptFileBlocks = namenode
.listCorruptFileBlocks("/corruptData", null);
numCorrupt = corruptFileBlocks.getFiles().length;
}
outStr = runFsck(conf, -1, true, "/corruptData", "-list-corruptfileblocks");
System.out.println("2. bad fsck out: " + outStr);
assertTrue(outStr.contains("has 3 CORRUPT files"));
// Do a listing on a dir which doesn't have any corrupt blocks and validate
util.createFiles(fs, "/goodData");
outStr = runFsck(conf, 0, true, "/goodData", "-list-corruptfileblocks");
System.out.println("3. good fsck out: " + outStr);
assertTrue(outStr.contains("has 0 CORRUPT files"));
util.cleanup(fs,"/corruptData");
util.cleanup(fs, "/goodData");
} finally {
if (cluster != null) {cluster.shutdown();}
}
}
/**
* Test for checking fsck command on illegal arguments should print the proper
* usage.
*/
@Test
public void testToCheckTheFsckCommandOnIllegalArguments() throws Exception {
MiniDFSCluster cluster = null;
try {
// bring up a one-node cluster
Configuration conf = new HdfsConfiguration();
cluster = new MiniDFSCluster.Builder(conf).build();
String fileName = "/test.txt";
Path filePath = new Path(fileName);
FileSystem fs = cluster.getFileSystem();
// create a one-block file
DFSTestUtil.createFile(fs, filePath, 1L, (short) 1, 1L);
DFSTestUtil.waitReplication(fs, filePath, (short) 1);
// passing illegal option
String outStr = runFsck(conf, -1, true, fileName, "-thisIsNotAValidFlag");
System.out.println(outStr);
assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// passing multiple paths are arguments
outStr = runFsck(conf, -1, true, "/", fileName);
System.out.println(outStr);
assertTrue(!outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// clean up file system
fs.delete(filePath, true);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Tests that the # of missing block replicas and expected replicas is correct
* @throws IOException
*/
@Test
public void testFsckMissingReplicas() throws IOException {
// Desired replication factor
// Set this higher than NUM_REPLICAS so it's under-replicated
final short REPL_FACTOR = 2;
// Number of replicas to actually start
final short NUM_REPLICAS = 1;
// Number of blocks to write
final short NUM_BLOCKS = 3;
// Set a small-ish blocksize
final long blockSize = 512;
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
try {
// Startup a minicluster
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_REPLICAS).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
// Create a file that will be intentionally under-replicated
final String pathString = new String("/testfile");
final Path path = new Path(pathString);
long fileLen = blockSize * NUM_BLOCKS;
DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
// Create an under-replicated file
NameNode namenode = cluster.getNameNode();
NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
.getDatanodeManager().getNetworkTopology();
Map<String,String[]> pmap = new HashMap<String, String[]>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
NUM_REPLICAS, remoteAddress);
// Run the fsck and check the Result
final HdfsFileStatus file =
namenode.getRpcServer().getFileInfo(pathString);
assertNotNull(file);
Result res = new Result(conf);
fsck.check(pathString, file, res);
// Also print the output from the fsck, for ex post facto sanity checks
System.out.println(result.toString());
assertEquals(res.missingReplicas,
(NUM_BLOCKS*REPL_FACTOR) - (NUM_BLOCKS*NUM_REPLICAS));
assertEquals(res.numExpectedReplicas, NUM_BLOCKS*REPL_FACTOR);
} finally {
if(dfs != null) {
dfs.close();
}
if(cluster != null) {
cluster.shutdown();
}
}
}
/**
* Tests that the # of misreplaced replicas is correct
* @throws IOException
*/
@Test
public void testFsckMisPlacedReplicas() throws IOException {
// Desired replication factor
final short REPL_FACTOR = 2;
// Number of replicas to actually start
short NUM_DN = 2;
// Number of blocks to write
final short NUM_BLOCKS = 3;
// Set a small-ish blocksize
final long blockSize = 512;
String [] racks = {"/rack1", "/rack1"};
String [] hosts = {"host1", "host2"};
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
try {
// Startup a minicluster
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
.racks(racks).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
// Create a file that will be intentionally under-replicated
final String pathString = new String("/testfile");
final Path path = new Path(pathString);
long fileLen = blockSize * NUM_BLOCKS;
DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1);
// Create an under-replicated file
NameNode namenode = cluster.getNameNode();
NetworkTopology nettop = cluster.getNamesystem().getBlockManager()
.getDatanodeManager().getNetworkTopology();
// Add a new node on different rack, so previous blocks' replicas
// are considered to be misplaced
nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3"));
NUM_DN++;
Map<String,String[]> pmap = new HashMap<String, String[]>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
NUM_DN, remoteAddress);
// Run the fsck and check the Result
final HdfsFileStatus file =
namenode.getRpcServer().getFileInfo(pathString);
assertNotNull(file);
Result res = new Result(conf);
fsck.check(pathString, file, res);
// check misReplicatedBlock number.
assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS);
} finally {
if(dfs != null) {
dfs.close();
}
if(cluster != null) {
cluster.shutdown();
}
}
}
/** Test fsck with FileNotFound */
@Test
public void testFsckFileNotFound() throws Exception {
// Number of replicas to actually start
final short NUM_REPLICAS = 1;
Configuration conf = new Configuration();
NameNode namenode = mock(NameNode.class);
NetworkTopology nettop = mock(NetworkTopology.class);
Map<String,String[]> pmap = new HashMap<>();
Writer result = new StringWriter();
PrintWriter out = new PrintWriter(result, true);
InetAddress remoteAddress = InetAddress.getLocalHost();
FSNamesystem fsName = mock(FSNamesystem.class);
FSDirectory fsd = mock(FSDirectory.class);
BlockManager blockManager = mock(BlockManager.class);
DatanodeManager dnManager = mock(DatanodeManager.class);
INodesInPath iip = mock(INodesInPath.class);
when(namenode.getNamesystem()).thenReturn(fsName);
when(fsName.getBlockManager()).thenReturn(blockManager);
when(fsName.getFSDirectory()).thenReturn(fsd);
when(fsd.getFSNamesystem()).thenReturn(fsName);
when(fsd.getINodesInPath(anyString(), anyBoolean())).thenReturn(iip);
when(blockManager.getDatanodeManager()).thenReturn(dnManager);
NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
NUM_REPLICAS, remoteAddress);
String pathString = "/tmp/testFile";
long length = 123L;
boolean isDir = false;
int blockReplication = 1;
long blockSize = 128 *1024L;
long modTime = 123123123L;
long accessTime = 123123120L;
FsPermission perms = FsPermission.getDefault();
String owner = "foo";
String group = "bar";
byte [] symlink = null;
byte [] path = DFSUtil.string2Bytes(pathString);
long fileId = 312321L;
int numChildren = 1;
byte storagePolicy = 0;
HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
blockSize, modTime, accessTime, perms, owner, group, symlink, path,
fileId, numChildren, null, storagePolicy);
Result res = new Result(conf);
try {
fsck.check(pathString, file, res);
} catch (Exception e) {
fail("Unexpected exception " + e.getMessage());
}
assertTrue(res.toString().contains("HEALTHY"));
}
/** Test fsck with symlinks in the filesystem */
@Test
public void testFsckSymlink() throws Exception {
final DFSTestUtil util = new DFSTestUtil.Builder().
setName(getClass().getSimpleName()).setNumFiles(1).build();
final Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
MiniDFSCluster cluster = null;
FileSystem fs = null;
try {
final long precision = 1L;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY, precision);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 10000L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
fs = cluster.getFileSystem();
final String fileName = "/srcdat";
util.createFiles(fs, fileName);
final FileContext fc = FileContext.getFileContext(
cluster.getConfiguration(0));
final Path file = new Path(fileName);
final Path symlink = new Path("/srcdat-symlink");
fc.createSymlink(file, symlink, false);
util.waitReplication(fs, fileName, (short)3);
long aTime = fc.getFileStatus(symlink).getAccessTime();
Thread.sleep(precision);
setupAuditLogs();
String outStr = runFsck(conf, 0, true, "/");
verifyAuditLogs();
assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
assertTrue(outStr.contains("Total symlinks:\t\t1"));
util.cleanup(fs, fileName);
} finally {
if (fs != null) {try{fs.close();} catch(Exception e){}}
if (cluster != null) { cluster.shutdown(); }
}
}
/**
* Test for including the snapshot files in fsck report
*/
@Test
public void testFsckForSnapshotFiles() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.build();
try {
String runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots",
"-files");
assertTrue(runFsck.contains("HEALTHY"));
final String fileName = "/srcdat";
DistributedFileSystem hdfs = cluster.getFileSystem();
Path file1 = new Path(fileName);
DFSTestUtil.createFile(hdfs, file1, 1024, (short) 1, 1000L);
hdfs.allowSnapshot(new Path("/"));
hdfs.createSnapshot(new Path("/"), "mySnapShot");
runFsck = runFsck(conf, 0, true, "/", "-includeSnapshots", "-files");
assertTrue(runFsck.contains("/.snapshot/mySnapShot/srcdat"));
runFsck = runFsck(conf, 0, true, "/", "-files");
assertFalse(runFsck.contains("mySnapShot"));
} finally {
cluster.shutdown();
}
}
/**
* Test for blockIdCK
*/
@Test
public void testBlockIdCK() throws Exception {
final short REPL_FACTOR = 2;
short NUM_DN = 2;
final long blockSize = 512;
String [] racks = {"/rack1", "/rack2"};
String [] hosts = {"host1", "host2"};
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
.racks(racks).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
DFSTestUtil util = new DFSTestUtil.Builder().
setName(getClass().getSimpleName()).setNumFiles(1).build();
//create files
final String pathString = new String("/testfile");
final Path path = new Path(pathString);
util.createFile(dfs, path, 1024, REPL_FACTOR , 1000L);
util.waitReplication(dfs, path, REPL_FACTOR);
StringBuilder sb = new StringBuilder();
for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
}
String[] bIds = sb.toString().split(" ");
//run fsck
try {
//illegal input test
String runFsckResult = runFsck(conf, 0, true, "/", "-blockId",
"not_a_block_id");
assertTrue(runFsckResult.contains("Incorrect blockId format:"));
//general test
runFsckResult = runFsck(conf, 0, true, "/", "-blockId", sb.toString());
assertTrue(runFsckResult.contains(bIds[0]));
assertTrue(runFsckResult.contains(bIds[1]));
assertTrue(runFsckResult.contains(
"Block replica on datanode/rack: host1/rack1 is HEALTHY"));
assertTrue(runFsckResult.contains(
"Block replica on datanode/rack: host2/rack2 is HEALTHY"));
} finally {
cluster.shutdown();
}
}
/**
* Test for blockIdCK with datanode decommission
*/
@Test
public void testBlockIdCKDecommission() throws Exception {
final short REPL_FACTOR = 1;
short NUM_DN = 2;
final long blockSize = 512;
boolean checkDecommissionInProgress = false;
String [] racks = {"/rack1", "/rack2"};
String [] hosts = {"host1", "host2"};
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 2);
MiniDFSCluster cluster;
DistributedFileSystem dfs ;
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
.racks(racks).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
DFSTestUtil util = new DFSTestUtil.Builder().
setName(getClass().getSimpleName()).setNumFiles(1).build();
//create files
final String pathString = new String("/testfile");
final Path path = new Path(pathString);
util.createFile(dfs, path, 1024, REPL_FACTOR, 1000L);
util.waitReplication(dfs, path, REPL_FACTOR);
StringBuilder sb = new StringBuilder();
for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
}
String[] bIds = sb.toString().split(" ");
try {
//make sure datanode that has replica is fine before decommission
String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
//decommission datanode
ExtendedBlock eb = util.getFirstBlock(dfs, path);
DatanodeDescriptor dn = cluster.getNameNode().getNamesystem()
.getBlockManager().getBlockCollection(eb.getLocalBlock())
.getBlocks()[0].getDatanode(0);
cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getDecomManager().startDecommission(dn);
String dnName = dn.getXferAddr();
//wait for decommission start
DatanodeInfo datanodeInfo = null;
int count = 0;
do {
Thread.sleep(2000);
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (dnName.equals(info.getXferAddr())) {
datanodeInfo = info;
}
}
//check decommissioning only once
if(!checkDecommissionInProgress && datanodeInfo != null
&& datanodeInfo.isDecommissionInProgress()) {
String fsckOut = runFsck(conf, 3, true, "/", "-blockId", bIds[0]);
assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONING_STATUS));
checkDecommissionInProgress = true;
}
} while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
//check decommissioned
String fsckOut = runFsck(conf, 2, true, "/", "-blockId", bIds[0]);
assertTrue(fsckOut.contains(NamenodeFsck.DECOMMISSIONED_STATUS));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test for blockIdCK with block corruption
*/
@Test
public void testBlockIdCKCorruption() throws Exception {
short NUM_DN = 1;
final long blockSize = 512;
Random random = new Random();
ExtendedBlock block;
short repFactor = 1;
String [] racks = {"/rack1"};
String [] hosts = {"host1"};
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000);
// Set short retry timeouts so this test runs faster
conf.setInt(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 10);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
MiniDFSCluster cluster = null;
DistributedFileSystem dfs = null;
try {
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
.racks(racks).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
DFSTestUtil util = new DFSTestUtil.Builder().
setName(getClass().getSimpleName()).setNumFiles(1).build();
//create files
final String pathString = new String("/testfile");
final Path path = new Path(pathString);
util.createFile(dfs, path, 1024, repFactor, 1000L);
util.waitReplication(dfs, path, repFactor);
StringBuilder sb = new StringBuilder();
for (LocatedBlock lb: util.getAllBlocks(dfs, path)){
sb.append(lb.getBlock().getLocalBlock().getBlockName()+" ");
}
String[] bIds = sb.toString().split(" ");
//make sure block is healthy before we corrupt it
String outStr = runFsck(conf, 0, true, "/", "-blockId", bIds[0]);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// corrupt replicas
block = DFSTestUtil.getFirstBlock(dfs, path);
File blockFile = cluster.getBlockFile(0, block);
if (blockFile != null && blockFile.exists()) {
RandomAccessFile raFile = new RandomAccessFile(blockFile, "rw");
FileChannel channel = raFile.getChannel();
String badString = "BADBAD";
int rand = random.nextInt((int) channel.size()/2);
raFile.seek(rand);
raFile.write(badString.getBytes());
raFile.close();
}
util.waitCorruptReplicas(dfs, cluster.getNamesystem(), path, block, 1);
outStr = runFsck(conf, 1, false, "/", "-blockId", block.getBlockName());
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
private void writeFile(final DistributedFileSystem dfs,
Path dir, String fileName) throws IOException {
Path filePath = new Path(dir.toString() + Path.SEPARATOR + fileName);
final FSDataOutputStream out = dfs.create(filePath);
out.writeChars("teststring");
out.close();
}
private void writeFile(final DistributedFileSystem dfs,
String dirName, String fileName, String StoragePolicy) throws IOException {
Path dirPath = new Path(dirName);
dfs.mkdirs(dirPath);
dfs.setStoragePolicy(dirPath, StoragePolicy);
writeFile(dfs, dirPath, fileName);
}
/**
* Test storage policy display
*/
@Test
public void testStoragePoliciesCK() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(3)
.storageTypes(
new StorageType[] {StorageType.DISK, StorageType.ARCHIVE})
.build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
writeFile(dfs, "/testhot", "file", "HOT");
writeFile(dfs, "/testwarm", "file", "WARM");
writeFile(dfs, "/testcold", "file", "COLD");
String outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
assertTrue(outStr.contains("DISK:3(HOT)"));
assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
assertTrue(outStr.contains("All blocks satisfy specified storage policy."));
dfs.setStoragePolicy(new Path("/testhot"), "COLD");
dfs.setStoragePolicy(new Path("/testwarm"), "COLD");
outStr = runFsck(conf, 0, true, "/", "-storagepolicies");
assertTrue(outStr.contains("DISK:3(HOT)"));
assertTrue(outStr.contains("DISK:1,ARCHIVE:2(WARM)"));
assertTrue(outStr.contains("ARCHIVE:3(COLD)"));
assertFalse(outStr.contains("All blocks satisfy specified storage policy."));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test for blocks on decommissioning hosts are not shown as missing
*/
@Test
public void testFsckWithDecommissionedReplicas() throws Exception {
final short REPL_FACTOR = 1;
short NUM_DN = 2;
final long blockSize = 512;
final long fileSize = 1024;
boolean checkDecommissionInProgress = false;
String [] racks = {"/rack1", "/rack2"};
String [] hosts = {"host1", "host2"};
Configuration conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);
MiniDFSCluster cluster;
DistributedFileSystem dfs ;
cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts)
.racks(racks).build();
assertNotNull("Failed Cluster Creation", cluster);
cluster.waitClusterUp();
dfs = cluster.getFileSystem();
assertNotNull("Failed to get FileSystem", dfs);
DFSTestUtil util = new DFSTestUtil.Builder().
setName(getClass().getSimpleName()).setNumFiles(1).build();
//create files
final String testFile = new String("/testfile");
final Path path = new Path(testFile);
util.createFile(dfs, path, fileSize, REPL_FACTOR, 1000L);
util.waitReplication(dfs, path, REPL_FACTOR);
try {
// make sure datanode that has replica is fine before decommission
String outStr = runFsck(conf, 0, true, testFile);
System.out.println(outStr);
assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS));
// decommission datanode
ExtendedBlock eb = util.getFirstBlock(dfs, path);
DatanodeDescriptor dn = cluster.getNameNode().getNamesystem()
.getBlockManager().getBlockCollection(eb.getLocalBlock())
.getBlocks()[0].getDatanode(0);
cluster.getNameNode().getNamesystem().getBlockManager()
.getDatanodeManager().getDecomManager().startDecommission(dn);
String dnName = dn.getXferAddr();
// wait for decommission start
DatanodeInfo datanodeInfo = null;
int count = 0;
do {
Thread.sleep(2000);
for (DatanodeInfo info : dfs.getDataNodeStats()) {
if (dnName.equals(info.getXferAddr())) {
datanodeInfo = info;
}
}
// check the replica status should be healthy(0)
// instead of corruption (1) during decommissioning
if(!checkDecommissionInProgress && datanodeInfo != null
&& datanodeInfo.isDecommissionInProgress()) {
String fsckOut = runFsck(conf, 0, true, testFile);
checkDecommissionInProgress = true;
}
} while (datanodeInfo != null && !datanodeInfo.isDecommissioned());
// check the replica status should be healthy(0) after decommission
// is done
String fsckOut = runFsck(conf, 0, true, testFile);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 61,987
| 37.217016
| 96
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAclConfigFlag.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.collect.Lists;
/**
* Tests that the configuration flag that controls support for ACLs is off by
* default and causes all attempted operations related to ACLs to fail. The
* NameNode can still load ACLs from fsimage or edits.
*/
public class TestAclConfigFlag {
private static final Path PATH = new Path("/path");
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
@Rule
public ExpectedException exception = ExpectedException.none();
@After
public void shutdown() throws Exception {
IOUtils.cleanup(null, fs);
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testModifyAclEntries() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.modifyAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveAclEntries() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveDefaultAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAclEntries(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testRemoveAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.removeAcl(PATH);
}
@Test
public void testSetAcl() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
}
@Test
public void testGetAclStatus() throws Exception {
initCluster(true, false);
fs.mkdirs(PATH);
expectException();
fs.getAclStatus(PATH);
}
@Test
public void testEditLog() throws Exception {
// With ACLs enabled, set an ACL.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
// Restart with ACLs disabled. Expect successful restart.
restart(false, false);
}
@Test
public void testFsImage() throws Exception {
// With ACLs enabled, set an ACL.
initCluster(true, true);
fs.mkdirs(PATH);
fs.setAcl(PATH, Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_WRITE)));
// Save a new checkpoint and restart with ACLs still enabled.
restart(true, true);
// Restart with ACLs disabled. Expect successful restart.
restart(false, false);
}
/**
* We expect an AclException, and we want the exception text to state the
* configuration key that controls ACL support.
*/
private void expectException() {
exception.expect(AclException.class);
exception.expectMessage(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY);
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @param aclsEnabled if true, ACL support is enabled
* @throws Exception if any step fails
*/
private void initCluster(boolean format, boolean aclsEnabled)
throws Exception {
Configuration conf = new Configuration();
// not explicitly setting to false, should be false by default
if (aclsEnabled) {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
}
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
fs = cluster.getFileSystem();
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @param aclsEnabled if true, ACL support is enabled
* @throws Exception if restart fails
*/
private void restart(boolean checkpoint, boolean aclsEnabled)
throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false, aclsEnabled);
}
}
| 5,962
| 30.384211
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogAutoroll.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.net.BindException;
import java.util.Random;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeEditLogRoller;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Supplier;
public class TestEditLogAutoroll {
private Configuration conf;
private MiniDFSCluster cluster;
private NameNode nn0;
private FileSystem fs;
private FSEditLog editLog;
private final Random random = new Random();
private static final Log LOG = LogFactory.getLog(TestEditLog.class);
@Before
public void setUp() throws Exception {
conf = new Configuration();
// Stall the standby checkpointer in two ways
conf.setLong(DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, Long.MAX_VALUE);
conf.setLong(DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 20);
// Make it autoroll after 10 edits
conf.setFloat(DFS_NAMENODE_EDIT_LOG_AUTOROLL_MULTIPLIER_THRESHOLD, 0.5f);
conf.setInt(DFS_NAMENODE_EDIT_LOG_AUTOROLL_CHECK_INTERVAL_MS, 100);
int retryCount = 0;
while (true) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(topology)
.numDataNodes(0)
.build();
cluster.waitActive();
nn0 = cluster.getNameNode(0);
fs = HATestUtil.configureFailoverFs(cluster, conf);
cluster.transitionToActive(0);
fs = cluster.getFileSystem(0);
editLog = nn0.getNamesystem().getEditLog();
++retryCount;
break;
} catch (BindException e) {
LOG.info("Set up MiniDFSCluster failed due to port conflicts, retry "
+ retryCount + " times");
}
}
}
@After
public void tearDown() throws Exception {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
@Test(timeout=60000)
public void testEditLogAutoroll() throws Exception {
// Make some edits
final long startTxId = editLog.getCurSegmentTxId();
for (int i=0; i<11; i++) {
fs.mkdirs(new Path("testEditLogAutoroll-" + i));
}
// Wait for the NN to autoroll
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return editLog.getCurSegmentTxId() > startTxId;
}
}, 1000, 5000);
// Transition to standby and make sure the roller stopped
nn0.transitionToStandby();
GenericTestUtils.assertNoThreadsMatching(
".*" + NameNodeEditLogRoller.class.getSimpleName() + ".*");
}
}
| 4,495
| 34.68254
| 103
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.assertNotNull;
import java.lang.management.ManagementFactory;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;
import javax.management.MBeanAttributeInfo;
import javax.management.MBeanInfo;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
import org.mortbay.util.ajax.JSON;
/**
* Class for testing {@link NameNodeMXBean} implementation
*/
public class TestFSNamesystemMBean {
/**
* MBeanClient tries to access FSNamesystem/FSNamesystemState/NameNodeInfo
* JMX properties. If it can access all the properties, the test is
* considered successful.
*/
private static class MBeanClient extends Thread {
private boolean succeeded = false;
@Override
public void run() {
try {
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
// Metrics that belong to "FSNamesystem", these are metrics that
// come from hadoop metrics framework for the class FSNamesystem.
ObjectName mxbeanNamefsn = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystem");
// Metrics that belong to "FSNamesystemState".
// These are metrics that FSNamesystem registers directly with MBeanServer.
ObjectName mxbeanNameFsns = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
// Metrics that belong to "NameNodeInfo".
// These are metrics that FSNamesystem registers directly with MBeanServer.
ObjectName mxbeanNameNni = new ObjectName(
"Hadoop:service=NameNode,name=NameNodeInfo");
final Set<ObjectName> mbeans = new HashSet<ObjectName>();
mbeans.add(mxbeanNamefsn);
mbeans.add(mxbeanNameFsns);
mbeans.add(mxbeanNameNni);
for(ObjectName mbean : mbeans) {
MBeanInfo attributes = mbs.getMBeanInfo(mbean);
for (MBeanAttributeInfo attributeInfo : attributes.getAttributes()) {
mbs.getAttribute(mbean, attributeInfo.getName());
}
}
succeeded = true;
} catch (Exception e) {
}
}
}
@Test
public void test() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNameNode().namesystem;
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=FSNamesystemState");
String snapshotStats = (String) (mbs.getAttribute(mxbeanName,
"SnapshotStats"));
@SuppressWarnings("unchecked")
Map<String, Object> stat = (Map<String, Object>) JSON
.parse(snapshotStats);
assertTrue(stat.containsKey("SnapshottableDirectories")
&& (Long) stat.get("SnapshottableDirectories") == fsn
.getNumSnapshottableDirs());
assertTrue(stat.containsKey("Snapshots")
&& (Long) stat.get("Snapshots") == fsn.getNumSnapshots());
Object pendingDeletionBlocks = mbs.getAttribute(mxbeanName,
"PendingDeletionBlocks");
assertNotNull(pendingDeletionBlocks);
assertTrue(pendingDeletionBlocks instanceof Long);
Object encryptionZones = mbs.getAttribute(mxbeanName,
"NumEncryptionZones");
assertNotNull(encryptionZones);
assertTrue(encryptionZones instanceof Integer);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
// The test makes sure JMX request can be processed even if namesystem's
// writeLock is owned by another thread.
@Test
public void testWithFSNamesystemWriteLock() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
FSNamesystem fsn = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
fsn = cluster.getNameNode().namesystem;
fsn.writeLock();
MBeanClient client = new MBeanClient();
client.start();
client.join(20000);
assertTrue("JMX calls are blocked when FSNamesystem's writerlock" +
"is owned by another thread", client.succeeded);
client.interrupt();
} finally {
if (fsn != null && fsn.hasWriteLock()) {
fsn.writeUnlock();
}
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 5,492
| 32.907407
| 83
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.FSLimitException.MaxDirectoryItemsExceededException;
import org.apache.hadoop.hdfs.protocol.FSLimitException.PathComponentTooLongException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Before;
import org.junit.Test;
public class TestFsLimits {
static Configuration conf;
static FSNamesystem fs;
static boolean fsIsReady;
static final PermissionStatus perms
= new PermissionStatus("admin", "admin", FsPermission.getDefault());
static private FSNamesystem getMockNamesystem() throws IOException {
FSImage fsImage = mock(FSImage.class);
FSEditLog editLog = mock(FSEditLog.class);
doReturn(editLog).when(fsImage).getEditLog();
FSNamesystem fsn = new FSNamesystem(conf, fsImage);
fsn.setImageLoaded(fsIsReady);
return fsn;
}
@Before
public void setUp() throws IOException {
conf = new Configuration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
fileAsURI(new File(MiniDFSCluster.getBaseDirectory(),
"namenode")).toString());
NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
fs = null;
fsIsReady = true;
}
@Test
public void testNoLimits() throws Exception {
mkdirs("/1", null);
mkdirs("/22", null);
mkdirs("/333", null);
mkdirs("/4444", null);
mkdirs("/55555", null);
mkdirs("/1/" + HdfsConstants.DOT_SNAPSHOT_DIR,
HadoopIllegalArgumentException.class);
}
@Test
public void testMaxComponentLength() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 2);
mkdirs("/1", null);
mkdirs("/22", null);
mkdirs("/333", PathComponentTooLongException.class);
mkdirs("/4444", PathComponentTooLongException.class);
}
@Test
public void testMaxComponentLengthRename() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 2);
mkdirs("/5", null);
rename("/5", "/555", PathComponentTooLongException.class);
rename("/5", "/55", null);
mkdirs("/6", null);
deprecatedRename("/6", "/666", PathComponentTooLongException.class);
deprecatedRename("/6", "/66", null);
}
@Test
public void testMaxDirItems() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
mkdirs("/1", null);
mkdirs("/22", null);
mkdirs("/333", MaxDirectoryItemsExceededException.class);
mkdirs("/4444", MaxDirectoryItemsExceededException.class);
}
@Test
public void testMaxDirItemsRename() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
mkdirs("/1", null);
mkdirs("/2", null);
mkdirs("/2/A", null);
rename("/2/A", "/A", MaxDirectoryItemsExceededException.class);
rename("/2/A", "/1/A", null);
mkdirs("/2/B", null);
deprecatedRename("/2/B", "/B", MaxDirectoryItemsExceededException.class);
deprecatedRename("/2/B", "/1/B", null);
rename("/1", "/3", null);
deprecatedRename("/2", "/4", null);
}
@Test
public void testMaxDirItemsLimits() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 0);
try {
mkdirs("1", null);
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set dfs", e);
}
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 64*100*1024);
try {
mkdirs("1", null);
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Cannot set dfs", e);
}
}
@Test
public void testMaxComponentsAndMaxDirItems() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
mkdirs("/1", null);
mkdirs("/22", null);
mkdirs("/333", MaxDirectoryItemsExceededException.class);
mkdirs("/4444", PathComponentTooLongException.class);
}
@Test
public void testDuringEditLogs() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 3);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_DIRECTORY_ITEMS_KEY, 2);
fsIsReady = false;
mkdirs("/1", null);
mkdirs("/22", null);
mkdirs("/333", null);
mkdirs("/4444", null);
mkdirs("/1/" + HdfsConstants.DOT_SNAPSHOT_DIR,
HadoopIllegalArgumentException.class);
}
@Test
/**
* This test verifies that error string contains the
* right parent directory name if the operation fails with
* PathComponentTooLongException
*/
public void testParentDirectoryNameIsCorrect() throws Exception {
conf.setInt(DFSConfigKeys.DFS_NAMENODE_MAX_COMPONENT_LENGTH_KEY, 20);
mkdirs("/user", null);
mkdirs("/user/testHome", null);
mkdirs("/user/testHome/FileNameLength", null);
mkdirCheckParentDirectory(
"/user/testHome/FileNameLength/really_big_name_0003_fail",
"/user/testHome/FileNameLength", PathComponentTooLongException.class);
renameCheckParentDirectory("/user/testHome/FileNameLength",
"/user/testHome/really_big_name_0003_fail", "/user/testHome/",
PathComponentTooLongException.class);
}
/**
* Verifies that Parent Directory is correct after a failed call to mkdir
* @param name Directory Name
* @param ParentDirName Expected Parent Directory
* @param expected Exception that is expected
* @throws Exception
*/
private void mkdirCheckParentDirectory(String name, String ParentDirName,
Class<?> expected)
throws Exception {
verify(mkdirs(name, expected), ParentDirName);
}
/**
*
/**
* Verifies that Parent Directory is correct after a failed call to mkdir
* @param name Directory Name
* @param dst Destination Name
* @param ParentDirName Expected Parent Directory
* @param expected Exception that is expected
* @throws Exception
*/
private void renameCheckParentDirectory(String name, String dst,
String ParentDirName,
Class<?> expected)
throws Exception {
verify(rename(name, dst, expected), ParentDirName);
}
/**
* verifies the ParentDirectory Name is present in the message given.
* @param message - Expection Message
* @param ParentDirName - Parent Directory Name to look for.
*/
private void verify(String message, String ParentDirName) {
boolean found = false;
if (message != null) {
String[] tokens = message.split("\\s+");
for (String token : tokens) {
if (token != null && token.equals(ParentDirName)) {
found = true;
break;
}
}
}
assertTrue(found);
}
private String mkdirs(String name, Class<?> expected)
throws Exception {
lazyInitFSDirectory();
Class<?> generated = null;
String errorString = null;
try {
fs.mkdirs(name, perms, false);
} catch (Throwable e) {
generated = e.getClass();
e.printStackTrace();
errorString = e.getMessage();
}
assertEquals(expected, generated);
return errorString;
}
private String rename(String src, String dst, Class<?> expected)
throws Exception {
lazyInitFSDirectory();
Class<?> generated = null;
String errorString = null;
try {
fs.renameTo(src, dst, false, new Rename[] { });
} catch (Throwable e) {
generated = e.getClass();
errorString = e.getMessage();
}
assertEquals(expected, generated);
return errorString;
}
@SuppressWarnings("deprecation")
private void deprecatedRename(String src, String dst, Class<?> expected)
throws Exception {
lazyInitFSDirectory();
Class<?> generated = null;
try {
fs.renameTo(src, dst, false);
} catch (Throwable e) {
generated = e.getClass();
}
assertEquals(expected, generated);
}
private static void lazyInitFSDirectory() throws IOException {
// have to create after the caller has had a chance to set conf values
if (fs == null) {
fs = getMockNamesystem();
}
}
}
| 9,777
| 31.593333
| 91
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_FAIL;
import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.TXNS_PER_ROLL;
import static org.apache.hadoop.hdfs.server.namenode.TestEditLog.setupEdits;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FilenameFilter;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.net.URI;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.PriorityQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.NativeCodeLoader;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.TreeMultiset;
public class TestFileJournalManager {
static final Log LOG = LogFactory.getLog(TestFileJournalManager.class);
private Configuration conf;
static {
// No need to fsync for the purposes of tests. This makes
// the tests run much faster.
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void setUp() {
conf = new Configuration();
}
/**
* Find out how many transactions we can read from a
* FileJournalManager, starting at a given transaction ID.
*
* @param jm The journal manager
* @param fromTxId Transaction ID to start at
* @param inProgressOk Should we consider edit logs that are not finalized?
* @return The number of transactions
* @throws IOException
*/
static long getNumberOfTransactions(FileJournalManager jm, long fromTxId,
boolean inProgressOk, boolean abortOnGap) throws IOException {
long numTransactions = 0, txId = fromTxId;
final PriorityQueue<EditLogInputStream> allStreams =
new PriorityQueue<EditLogInputStream>(64,
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
jm.selectInputStreams(allStreams, fromTxId, inProgressOk);
EditLogInputStream elis = null;
try {
while ((elis = allStreams.poll()) != null) {
try {
elis.skipUntil(txId);
while (true) {
FSEditLogOp op = elis.readOp();
if (op == null) {
break;
}
if (abortOnGap && (op.getTransactionId() != txId)) {
LOG.info("getNumberOfTransactions: detected gap at txId "
+ fromTxId);
return numTransactions;
}
txId = op.getTransactionId() + 1;
numTransactions++;
}
} finally {
IOUtils.cleanup(LOG, elis);
}
}
} finally {
IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
}
return numTransactions;
}
/**
* Test the normal operation of loading transactions from
* file journal manager. 3 edits directories are setup without any
* failures. Test that we read in the expected number of transactions.
*/
@Test
public void testNormalOperation() throws IOException {
File f1 = new File(TestEditLog.TEST_DIR + "/normtest0");
File f2 = new File(TestEditLog.TEST_DIR + "/normtest1");
File f3 = new File(TestEditLog.TEST_DIR + "/normtest2");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
NNStorage storage = setupEdits(editUris, 5);
long numJournals = 0;
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
numJournals++;
}
assertEquals(3, numJournals);
}
/**
* Test that inprogress files are handled correct. Set up a single
* edits directory. Fail on after the last roll. Then verify that the
* logs have the expected number of transactions.
*/
@Test
public void testInprogressRecovery() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/inprogressrecovery");
// abort after the 5th roll
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
5, new AbortSpec(5, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL,
getNumberOfTransactions(jm, 1, true, false));
}
/**
* Test a mixture of inprogress files and finalised. Set up 3 edits
* directories and fail the second on the last roll. Verify that reading
* the transactions, reads from the finalised directories.
*/
@Test
public void testInprogressRecoveryMixed() throws IOException {
File f1 = new File(TestEditLog.TEST_DIR + "/mixtest0");
File f2 = new File(TestEditLog.TEST_DIR + "/mixtest1");
File f3 = new File(TestEditLog.TEST_DIR + "/mixtest2");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
// abort after the 5th roll
NNStorage storage = setupEdits(editUris,
5, new AbortSpec(5, 1));
Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd = dirs.next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
sd = dirs.next();
jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
true, false));
sd = dirs.next();
jm = new FileJournalManager(conf, sd, storage);
assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
}
/**
* Test that FileJournalManager behaves correctly despite inprogress
* files in all its edit log directories. Set up 3 directories and fail
* all on the last roll. Verify that the correct number of transaction
* are then loaded.
*/
@Test
public void testInprogressRecoveryAll() throws IOException {
File f1 = new File(TestEditLog.TEST_DIR + "/failalltest0");
File f2 = new File(TestEditLog.TEST_DIR + "/failalltest1");
File f3 = new File(TestEditLog.TEST_DIR + "/failalltest2");
List<URI> editUris = ImmutableList.of(f1.toURI(), f2.toURI(), f3.toURI());
// abort after the 5th roll
NNStorage storage = setupEdits(editUris, 5,
new AbortSpec(5, 0),
new AbortSpec(5, 1),
new AbortSpec(5, 2));
Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd = dirs.next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
true, false));
sd = dirs.next();
jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
true, false));
sd = dirs.next();
jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
true, false));
}
/**
* Corrupt an edit log file after the start segment transaction
*/
private void corruptAfterStartSegment(File f) throws IOException {
RandomAccessFile raf = new RandomAccessFile(f, "rw");
raf.seek(0x20); // skip version and first tranaction and a bit of next transaction
for (int i = 0; i < 1000; i++) {
raf.writeInt(0xdeadbeef);
}
raf.close();
}
@Test(expected=IllegalStateException.class)
public void testFinalizeErrorReportedToNNStorage() throws IOException, InterruptedException {
File f = new File(TestEditLog.TEST_DIR + "/filejournaltestError");
// abort after 10th roll
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
10, new AbortSpec(10, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
String sdRootPath = sd.getRoot().getAbsolutePath();
FileUtil.chmod(sdRootPath, "-w", true);
try {
jm.finalizeLogSegment(0, 1);
} finally {
FileUtil.chmod(sdRootPath, "+w", true);
assertTrue(storage.getRemovedStorageDirs().contains(sd));
}
}
/**
* Test that we can read from a stream created by FileJournalManager.
* Create a single edits directory, failing it on the final roll.
* Then try loading from the point of the 3rd roll. Verify that we read
* the correct number of transactions from this point.
*/
@Test
public void testReadFromStream() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/readfromstream");
// abort after 10th roll
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
10, new AbortSpec(10, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
long expectedTotalTxnCount = TXNS_PER_ROLL*10 + TXNS_PER_FAIL;
assertEquals(expectedTotalTxnCount, getNumberOfTransactions(jm, 1,
true, false));
long skippedTxns = (3*TXNS_PER_ROLL); // skip first 3 files
long startingTxId = skippedTxns + 1;
long numLoadable = getNumberOfTransactions(jm, startingTxId,
true, false);
assertEquals(expectedTotalTxnCount - skippedTxns, numLoadable);
}
/**
* Make requests with starting transaction ids which don't match the beginning
* txid of some log segments.
*
* This should succeed.
*/
@Test
public void testAskForTransactionsMidfile() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/askfortransactionsmidfile");
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
10);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
// 10 rolls, so 11 rolled files, 110 txids total.
final int TOTAL_TXIDS = 10 * 11;
for (int txid = 1; txid <= TOTAL_TXIDS; txid++) {
assertEquals((TOTAL_TXIDS - txid) + 1, getNumberOfTransactions(jm, txid,
true, false));
}
}
/**
* Test that we receive the correct number of transactions when we count
* the number of transactions around gaps.
* Set up a single edits directory, with no failures. Delete the 4th logfile.
* Test that getNumberOfTransactions returns the correct number of
* transactions before this gap and after this gap. Also verify that if you
* try to count on the gap that an exception is thrown.
*/
@Test
public void testManyLogsWithGaps() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/manylogswithgaps");
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
final long startGapTxId = 3*TXNS_PER_ROLL + 1;
final long endGapTxId = 4*TXNS_PER_ROLL;
File[] files = new File(f, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith(NNStorage.getFinalizedEditsFileName(startGapTxId, endGapTxId))) {
return true;
}
return false;
}
});
assertEquals(1, files.length);
assertTrue(files[0].delete());
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(startGapTxId-1, getNumberOfTransactions(jm, 1, true, true));
assertEquals(0, getNumberOfTransactions(jm, startGapTxId, true, true));
// rolled 10 times so there should be 11 files.
assertEquals(11*TXNS_PER_ROLL - endGapTxId,
getNumberOfTransactions(jm, endGapTxId + 1, true, true));
}
/**
* Test that we can load an edits directory with a corrupt inprogress file.
* The corrupt inprogress file should be moved to the side.
*/
@Test
public void testManyLogsWithCorruptInprogress() throws IOException {
File f = new File(TestEditLog.TEST_DIR + "/manylogswithcorruptinprogress");
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()), 10, new AbortSpec(10, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
File[] files = new File(f, "current").listFiles(new FilenameFilter() {
@Override
public boolean accept(File dir, String name) {
if (name.startsWith("edits_inprogress")) {
return true;
}
return false;
}
});
assertEquals(files.length, 1);
corruptAfterStartSegment(files[0]);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(10*TXNS_PER_ROLL+1,
getNumberOfTransactions(jm, 1, true, false));
}
@Test
public void testGetRemoteEditLog() throws IOException {
StorageDirectory sd = FSImageTestUtil.mockStorageDirectory(
NameNodeDirType.EDITS, false,
NNStorage.getFinalizedEditsFileName(1, 100),
NNStorage.getFinalizedEditsFileName(101, 200),
NNStorage.getInProgressEditsFileName(201),
NNStorage.getFinalizedEditsFileName(1001, 1100));
// passing null for NNStorage because this unit test will not use it
FileJournalManager fjm = new FileJournalManager(conf, sd, null);
assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1));
assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101));
assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 150));
assertEquals("[1001,1100]", getLogsAsString(fjm, 201));
assertEquals("Asking for a newer log than exists should return empty list",
"", getLogsAsString(fjm, 9999));
}
/**
* tests that passing an invalid dir to matchEditLogs throws IOException
*/
@Test(expected = IOException.class)
public void testMatchEditLogInvalidDirThrowsIOException() throws IOException {
File badDir = new File("does not exist");
FileJournalManager.matchEditLogs(badDir);
}
private static EditLogInputStream getJournalInputStream(JournalManager jm,
long txId, boolean inProgressOk) throws IOException {
final PriorityQueue<EditLogInputStream> allStreams =
new PriorityQueue<EditLogInputStream>(64,
JournalSet.EDIT_LOG_INPUT_STREAM_COMPARATOR);
jm.selectInputStreams(allStreams, txId, inProgressOk);
EditLogInputStream elis = null, ret;
try {
while ((elis = allStreams.poll()) != null) {
if (elis.getFirstTxId() > txId) {
break;
}
if (elis.getLastTxId() < txId) {
elis.close();
continue;
}
elis.skipUntil(txId);
ret = elis;
elis = null;
return ret;
}
} finally {
IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0]));
IOUtils.cleanup(LOG, elis);
}
return null;
}
/**
* Make sure that we starting reading the correct op when we request a stream
* with a txid in the middle of an edit log file.
*/
@Test
public void testReadFromMiddleOfEditLog() throws CorruptionException,
IOException {
File f = new File(TestEditLog.TEST_DIR + "/readfrommiddleofeditlog");
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
10);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
EditLogInputStream elis = getJournalInputStream(jm, 5, true);
try {
FSEditLogOp op = elis.readOp();
assertEquals("read unexpected op", op.getTransactionId(), 5);
} finally {
IOUtils.cleanup(LOG, elis);
}
}
/**
* Make sure that in-progress streams aren't counted if we don't ask for
* them.
*/
@Test
public void testExcludeInProgressStreams() throws CorruptionException,
IOException {
File f = new File(TestEditLog.TEST_DIR + "/excludeinprogressstreams");
// Don't close the edit log once the files have been set up.
NNStorage storage = setupEdits(Collections.<URI>singletonList(f.toURI()),
10, false);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
// If we exclude the in-progess stream, we should only have 100 tx.
assertEquals(100, getNumberOfTransactions(jm, 1, false, false));
EditLogInputStream elis = getJournalInputStream(jm, 90, false);
try {
FSEditLogOp lastReadOp = null;
while ((lastReadOp = elis.readOp()) != null) {
assertTrue(lastReadOp.getTransactionId() <= 100);
}
} finally {
IOUtils.cleanup(LOG, elis);
}
}
/**
* Tests that internal renames are done using native code on platforms that
* have it. The native rename includes more detailed information about the
* failure, which can be useful for troubleshooting.
*/
@Test
public void testDoPreUpgradeIOError() throws IOException {
File storageDir = new File(TestEditLog.TEST_DIR, "preupgradeioerror");
List<URI> editUris = Collections.singletonList(storageDir.toURI());
NNStorage storage = setupEdits(editUris, 5);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
assertNotNull(sd);
// Change storage directory so that renaming current to previous.tmp fails.
FileUtil.setWritable(storageDir, false);
FileJournalManager jm = null;
try {
jm = new FileJournalManager(conf, sd, storage);
exception.expect(IOException.class);
if (NativeCodeLoader.isNativeCodeLoaded()) {
exception.expectMessage("failure in native rename");
}
jm.doPreUpgrade();
} finally {
IOUtils.cleanup(LOG, jm);
// Restore permissions on storage directory and make sure we can delete.
FileUtil.setWritable(storageDir, true);
FileUtil.fullyDelete(storageDir);
}
}
private static String getLogsAsString(
FileJournalManager fjm, long firstTxId) throws IOException {
return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId, false));
}
}
| 20,190
| 37.978764
| 104
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithAcl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Test;
import com.google.common.collect.Lists;
public class TestFSImageWithAcl {
private static Configuration conf;
private static MiniDFSCluster cluster;
@BeforeClass
public static void setUp() throws IOException {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
}
@AfterClass
public static void tearDown() {
cluster.shutdown();
}
private void testAcl(boolean persistNamespace) throws IOException {
Path p = new Path("/p");
DistributedFileSystem fs = cluster.getFileSystem();
fs.create(p).close();
fs.mkdirs(new Path("/23"));
AclEntry e = new AclEntry.Builder().setName("foo")
.setPermission(READ_EXECUTE).setScope(ACCESS).setType(USER).build();
fs.modifyAclEntries(p, Lists.newArrayList(e));
restart(fs, persistNamespace);
AclStatus s = cluster.getNamesystem().getAclStatus(p.toString());
AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(
new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ) }, returned);
fs.removeAcl(p);
if (persistNamespace) {
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
cluster.waitActive();
s = cluster.getNamesystem().getAclStatus(p.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] { }, returned);
fs.modifyAclEntries(p, Lists.newArrayList(e));
s = cluster.getNamesystem().getAclStatus(p.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ) }, returned);
}
@Test
public void testPersistAcl() throws IOException {
testAcl(true);
}
@Test
public void testAclEditLog() throws IOException {
testAcl(false);
}
private void doTestDefaultAclNewChildren(boolean persistNamespace)
throws IOException {
Path dirPath = new Path("/dir");
Path filePath = new Path(dirPath, "file1");
Path subdirPath = new Path(dirPath, "subdir1");
DistributedFileSystem fs = cluster.getFileSystem();
fs.mkdirs(dirPath);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(dirPath, aclSpec);
fs.create(filePath).close();
fs.mkdirs(subdirPath);
AclEntry[] fileExpected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
AclEntry[] subdirExpected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, READ_EXECUTE) };
AclEntry[] fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
AclEntry[] subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)010755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)010755);
aclSpec = Lists.newArrayList(aclEntry(DEFAULT, USER, "foo", READ_WRITE));
fs.modifyAclEntries(dirPath, aclSpec);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)010755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)010755);
fs.removeAcl(dirPath);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)010755);
restart(fs, persistNamespace);
fileReturned = fs.getAclStatus(filePath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(fileExpected, fileReturned);
subdirReturned = fs.getAclStatus(subdirPath).getEntries()
.toArray(new AclEntry[0]);
Assert.assertArrayEquals(subdirExpected, subdirReturned);
assertPermission(fs, subdirPath, (short)010755);
}
@Test
public void testFsImageDefaultAclNewChildren() throws IOException {
doTestDefaultAclNewChildren(true);
}
@Test
public void testEditLogDefaultAclNewChildren() throws IOException {
doTestDefaultAclNewChildren(false);
}
/**
* Restart the NameNode, optionally saving a new checkpoint.
*
* @param fs DistributedFileSystem used for saving namespace
* @param persistNamespace boolean true to save a new checkpoint
* @throws IOException if restart fails
*/
private void restart(DistributedFileSystem fs, boolean persistNamespace)
throws IOException {
if (persistNamespace) {
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
}
cluster.restartNameNode();
cluster.waitActive();
}
}
| 8,182
| 34.890351
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextXAttr.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.URI;
import java.util.EnumSet;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.junit.BeforeClass;
/**
* Tests of XAttr operations using FileContext APIs.
*/
public class TestFileContextXAttr extends FSXAttrBaseTest {
@Override
protected FileSystem createFileSystem() throws Exception {
FileContextFS fcFs = new FileContextFS();
fcFs.initialize(FileSystem.getDefaultUri(conf), conf);
return fcFs;
}
/**
* This reuses FSXAttrBaseTest's testcases by creating a filesystem
* implementation which uses FileContext by only overriding the xattr related
* methods. Other operations will use the normal filesystem.
*/
public static class FileContextFS extends DistributedFileSystem {
private FileContext fc;
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
fc = FileContext.getFileContext(conf);
}
@Override
public void setXAttr(Path path, final String name, final byte[] value)
throws IOException {
fc.setXAttr(path, name, value);
}
@Override
public void setXAttr(Path path, final String name, final byte[] value,
final EnumSet<XAttrSetFlag> flag) throws IOException {
fc.setXAttr(path, name, value, flag);
}
@Override
public byte[] getXAttr(Path path, final String name) throws IOException {
return fc.getXAttr(path, name);
}
@Override
public Map<String, byte[]> getXAttrs(Path path) throws IOException {
return fc.getXAttrs(path);
}
@Override
public Map<String, byte[]> getXAttrs(Path path, final List<String> names)
throws IOException {
return fc.getXAttrs(path, names);
}
@Override
public void removeXAttr(Path path, final String name) throws IOException {
fc.removeXAttr(path, name);
}
}
}
| 3,094
| 30.907216
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.net.Node;
import org.junit.After;
import org.junit.Test;
/**
* Test to ensure requests from dead datnodes are rejected by namenode with
* appropriate exceptions/failure response
*/
public class TestDeadDatanode {
private static final Log LOG = LogFactory.getLog(TestDeadDatanode.class);
private MiniDFSCluster cluster;
@After
public void cleanup() {
cluster.shutdown();
}
/**
* Test to ensure namenode rejects request from dead datanode
* - Start a cluster
* - Shutdown the datanode and wait for it to be marked dead at the namenode
* - Send datanode requests to Namenode and make sure it is rejected
* appropriately.
*/
@Test
public void testDeadDatanode() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
String poolId = cluster.getNamesystem().getBlockPoolId();
// wait for datanode to be marked live
DataNode dn = cluster.getDataNodes().get(0);
DatanodeRegistration reg =
DataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
// Shutdown and wait for datanode to be marked dead
dn.shutdown();
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
DatanodeProtocol dnp = cluster.getNameNodeRpc();
ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(
new Block(0),
ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK,
null) };
StorageReceivedDeletedBlocks[] storageBlocks = {
new StorageReceivedDeletedBlocks(reg.getDatanodeUuid(), blocks) };
// Ensure blockReceived call from dead datanode is rejected with IOException
try {
dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
fail("Expected IOException is not thrown");
} catch (IOException ex) {
// Expected
}
// Ensure blockReport from dead datanode is rejected with IOException
StorageBlockReport[] report = { new StorageBlockReport(
new DatanodeStorage(reg.getDatanodeUuid()),
BlockListAsLongs.EMPTY) };
try {
dnp.blockReport(reg, poolId, report,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
fail("Expected IOException is not thrown");
} catch (IOException ex) {
// Expected
}
// Ensure heartbeat from dead datanode is rejected with a command
// that asks datanode to register again
StorageReport[] rep = { new StorageReport(
new DatanodeStorage(reg.getDatanodeUuid()),
false, 0, 0, 0, 0) };
DatanodeCommand[] cmd =
dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true).getCommands();
assertEquals(1, cmd.length);
assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction());
}
@Test
public void testDeadNodeAsBlockTarget() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String poolId = cluster.getNamesystem().getBlockPoolId();
// wait for datanode to be marked live
DataNode dn = cluster.getDataNodes().get(0);
DatanodeRegistration reg = DataNodeTestUtils.getDNRegistrationForBP(cluster
.getDataNodes().get(0), poolId);
// Get the updated datanode descriptor
BlockManager bm = cluster.getNamesystem().getBlockManager();
DatanodeManager dm = bm.getDatanodeManager();
Node clientNode = dm.getDatanode(reg);
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true,
20000);
// Shutdown and wait for datanode to be marked dead
dn.shutdown();
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false,
20000);
// Get the updated datanode descriptor available in DNM
// choose the targets, but local node should not get selected as this is not
// part of the cluster anymore
DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7);
for (DatanodeStorageInfo datanodeStorageInfo : results) {
assertFalse("Dead node should not be choosen", datanodeStorageInfo
.getDatanodeDescriptor().equals(clientNode));
}
}
}
| 7,172
| 40.703488
| 86
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEditsFileName;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.cli.CLITestCmdDFS;
import org.apache.hadoop.cli.util.CLICommandDFSAdmin;
import org.apache.hadoop.cli.util.CommandExecutor;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream;
import org.apache.hadoop.util.Shell;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import com.google.common.collect.ImmutableSet;
/**
* Startup and checkpoint tests
*
*/
public class TestStorageRestore {
public static final String NAME_NODE_HOST = "localhost:";
public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
private static final Log LOG =
LogFactory.getLog(TestStorageRestore.class.getName());
private Configuration config;
private File hdfsDir=null;
static final long seed = 0xAAAAEEFL;
static final int blockSize = 4096;
static final int fileSize = 8192;
private File path1, path2, path3;
private MiniDFSCluster cluster;
@Before
public void setUpNameDirs() throws Exception {
config = new HdfsConfiguration();
hdfsDir = new File(MiniDFSCluster.getBaseDirectory()).getCanonicalFile();
if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'");
}
hdfsDir.mkdirs();
path1 = new File(hdfsDir, "name1");
path2 = new File(hdfsDir, "name2");
path3 = new File(hdfsDir, "name3");
path1.mkdir(); path2.mkdir(); path3.mkdir();
if(!path2.exists() || !path3.exists() || !path1.exists()) {
throw new IOException("Couldn't create dfs.name dirs in " + hdfsDir.getAbsolutePath());
}
String dfs_name_dir = new String(path1.getPath() + "," + path2.getPath());
System.out.println("configuring hdfsdir is " + hdfsDir.getAbsolutePath() +
"; dfs_name_dir = "+ dfs_name_dir + ";dfs_name_edits_dir(only)=" + path3.getPath());
config.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, dfs_name_dir);
config.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, dfs_name_dir + "," + path3.getPath());
config.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY,new File(hdfsDir, "secondary").getPath());
FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
config.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
// set the restore feature on
config.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
}
/**
* invalidate storage by removing the second and third storage directories
*/
public void invalidateStorage(FSImage fi, Set<File> filesToInvalidate) throws IOException {
ArrayList<StorageDirectory> al = new ArrayList<StorageDirectory>(2);
Iterator<StorageDirectory> it = fi.getStorage().dirIterator();
while(it.hasNext()) {
StorageDirectory sd = it.next();
if(filesToInvalidate.contains(sd.getRoot())) {
LOG.info("causing IO error on " + sd.getRoot());
al.add(sd);
}
}
// simulate an error
fi.getStorage().reportErrorsOnDirectories(al);
for (JournalAndStream j : fi.getEditLog().getJournals()) {
if (j.getManager() instanceof FileJournalManager) {
FileJournalManager fm = (FileJournalManager)j.getManager();
if (fm.getStorageDirectory().getRoot().equals(path2)
|| fm.getStorageDirectory().getRoot().equals(path3)) {
EditLogOutputStream mockStream = spy(j.getCurrentStream());
j.setCurrentStreamForTests(mockStream);
doThrow(new IOException("Injected fault: write")).
when(mockStream).write(Mockito.<FSEditLogOp>anyObject());
}
}
}
}
/**
* test
*/
private void printStorages(FSImage image) {
FSImageTestUtil.logStorageContents(LOG, image.getStorage());
}
/**
* test
* 1. create DFS cluster with 3 storage directories - 2 EDITS_IMAGE, 1 EDITS
* 2. create a cluster and write a file
* 3. corrupt/disable one storage (or two) by removing
* 4. run doCheckpoint - it will fail on removed dirs (which
* will invalidate the storages)
* 5. write another file
* 6. check that edits and fsimage differ
* 7. run doCheckpoint
* 8. verify that all the image and edits files are the same.
*/
@Test
public void testStorageRestore() throws Exception {
int numDatanodes = 0;
cluster = new MiniDFSCluster.Builder(config).numDataNodes(numDatanodes)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
SecondaryNameNode secondary = new SecondaryNameNode(config);
System.out.println("****testStorageRestore: Cluster and SNN started");
printStorages(cluster.getNameNode().getFSImage());
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/", "test");
assertTrue(fs.mkdirs(path));
System.out.println("****testStorageRestore: dir 'test' created, invalidating storage...");
invalidateStorage(cluster.getNameNode().getFSImage(), ImmutableSet.of(path2, path3));
printStorages(cluster.getNameNode().getFSImage());
System.out.println("****testStorageRestore: storage invalidated");
path = new Path("/", "test1");
assertTrue(fs.mkdirs(path));
System.out.println("****testStorageRestore: dir 'test1' created");
// We did another edit, so the still-active directory at 'path1'
// should now differ from the others
FSImageTestUtil.assertFileContentsDifferent(2,
new File(path1, "current/" + getInProgressEditsFileName(1)),
new File(path2, "current/" + getInProgressEditsFileName(1)),
new File(path3, "current/" + getInProgressEditsFileName(1)));
FSImageTestUtil.assertFileContentsSame(
new File(path2, "current/" + getInProgressEditsFileName(1)),
new File(path3, "current/" + getInProgressEditsFileName(1)));
System.out.println("****testStorageRestore: checkfiles(false) run");
secondary.doCheckpoint(); ///should enable storage..
// We should have a checkpoint through txid 4 in the two image dirs
// (txid=4 for BEGIN, mkdir, mkdir, END)
FSImageTestUtil.assertFileContentsSame(
new File(path1, "current/" + getImageFileName(4)),
new File(path2, "current/" + getImageFileName(4)));
assertFalse("Should not have any image in an edits-only directory",
new File(path3, "current/" + getImageFileName(4)).exists());
// Should have finalized logs in the directory that didn't fail
assertTrue("Should have finalized logs in the directory that didn't fail",
new File(path1, "current/" + getFinalizedEditsFileName(1,4)).exists());
// Should not have finalized logs in the failed directories
assertFalse("Should not have finalized logs in the failed directories",
new File(path2, "current/" + getFinalizedEditsFileName(1,4)).exists());
assertFalse("Should not have finalized logs in the failed directories",
new File(path3, "current/" + getFinalizedEditsFileName(1,4)).exists());
// The new log segment should be in all of the directories.
FSImageTestUtil.assertFileContentsSame(
new File(path1, "current/" + getInProgressEditsFileName(5)),
new File(path2, "current/" + getInProgressEditsFileName(5)),
new File(path3, "current/" + getInProgressEditsFileName(5)));
String md5BeforeEdit = FSImageTestUtil.getFileMD5(
new File(path1, "current/" + getInProgressEditsFileName(5)));
// The original image should still be the previously failed image
// directory after it got restored, since it's still useful for
// a recovery!
FSImageTestUtil.assertFileContentsSame(
new File(path1, "current/" + getImageFileName(0)),
new File(path2, "current/" + getImageFileName(0)));
// Do another edit to verify that all the logs are active.
path = new Path("/", "test2");
assertTrue(fs.mkdirs(path));
// Logs should be changed by the edit.
String md5AfterEdit = FSImageTestUtil.getFileMD5(
new File(path1, "current/" + getInProgressEditsFileName(5)));
assertFalse(md5BeforeEdit.equals(md5AfterEdit));
// And all logs should be changed.
FSImageTestUtil.assertFileContentsSame(
new File(path1, "current/" + getInProgressEditsFileName(5)),
new File(path2, "current/" + getInProgressEditsFileName(5)),
new File(path3, "current/" + getInProgressEditsFileName(5)));
secondary.shutdown();
cluster.shutdown();
// All logs should be finalized by clean shutdown
FSImageTestUtil.assertFileContentsSame(
new File(path1, "current/" + getFinalizedEditsFileName(5,7)),
new File(path2, "current/" + getFinalizedEditsFileName(5,7)),
new File(path3, "current/" + getFinalizedEditsFileName(5,7)));
}
/**
* Test dfsadmin -restoreFailedStorage command
* @throws Exception
*/
@Test
public void testDfsAdminCmd() throws Exception {
cluster = new MiniDFSCluster.Builder(config).
numDataNodes(2).
manageNameDfsDirs(false).build();
cluster.waitActive();
try {
FSImage fsi = cluster.getNameNode().getFSImage();
// it is started with dfs.namenode.name.dir.restore set to true (in SetUp())
boolean restore = fsi.getStorage().getRestoreFailedStorage();
LOG.info("Restore is " + restore);
assertEquals(restore, true);
// now run DFSAdmnin command
String cmd = "-fs NAMENODE -restoreFailedStorage false";
String namenode = config.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
CommandExecutor executor =
new CLITestCmdDFS(cmd, new CLICommandDFSAdmin()).getExecutor(namenode);
executor.executeCommand(cmd);
restore = fsi.getStorage().getRestoreFailedStorage();
assertFalse("After set true call restore is " + restore, restore);
// run one more time - to set it to true again
cmd = "-fs NAMENODE -restoreFailedStorage true";
executor.executeCommand(cmd);
restore = fsi.getStorage().getRestoreFailedStorage();
assertTrue("After set false call restore is " + restore, restore);
// run one more time - no change in value
cmd = "-fs NAMENODE -restoreFailedStorage check";
CommandExecutor.Result cmdResult = executor.executeCommand(cmd);
restore = fsi.getStorage().getRestoreFailedStorage();
assertTrue("After check call restore is " + restore, restore);
String commandOutput = cmdResult.getCommandOutput();
commandOutput.trim();
assertTrue(commandOutput.contains("restoreFailedStorage is set to true"));
} finally {
cluster.shutdown();
}
}
/**
* Test to simulate interleaved checkpointing by 2 2NNs after a storage
* directory has been taken offline. The first will cause the directory to
* come back online, but it won't have any valid contents. The second 2NN will
* then try to perform a checkpoint. The NN should not serve up the image or
* edits from the restored (empty) dir.
*/
@Test
public void testMultipleSecondaryCheckpoint() throws IOException {
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
.manageNameDfsDirs(false).build();
cluster.waitActive();
secondary = new SecondaryNameNode(config);
FSImage fsImage = cluster.getNameNode().getFSImage();
printStorages(fsImage);
FileSystem fs = cluster.getFileSystem();
Path testPath = new Path("/", "test");
assertTrue(fs.mkdirs(testPath));
printStorages(fsImage);
// Take name1 offline
invalidateStorage(fsImage, ImmutableSet.of(path1));
// Simulate a 2NN beginning a checkpoint, but not finishing. This will
// cause name1 to be restored.
cluster.getNameNodeRpc().rollEditLog();
printStorages(fsImage);
// Now another 2NN comes along to do a full checkpoint.
secondary.doCheckpoint();
printStorages(fsImage);
// The created file should still exist in the in-memory FS state after the
// checkpoint.
assertTrue("path exists before restart", fs.exists(testPath));
secondary.shutdown();
// Restart the NN so it reloads the edits from on-disk.
cluster.restartNameNode();
// The created file should still exist after the restart.
assertTrue("path should still exist after restart", fs.exists(testPath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
/**
* 1. create DFS cluster with 3 storage directories
* - 2 EDITS_IMAGE(name1, name2), 1 EDITS(name3)
* 2. create a file
* 3. corrupt/disable name2 and name3 by removing rwx permission
* 4. run doCheckpoint
* - will fail on removed dirs (which invalidates them)
* 5. write another file
* 6. check there is only one healthy storage dir
* 7. run doCheckpoint - recover should fail but checkpoint should succeed
* 8. check there is still only one healthy storage dir
* 9. restore the access permission for name2 and name 3, run checkpoint again
* 10.verify there are 3 healthy storage dirs.
*/
@Test
public void testStorageRestoreFailure() throws Exception {
SecondaryNameNode secondary = null;
// On windows, revoking write+execute permission on name2 does not
// prevent us from creating files in name2\current. Hence we revoke
// permissions on name2\current for the test.
String nameDir2 = Shell.WINDOWS ?
(new File(path2, "current").getAbsolutePath()) : path2.toString();
String nameDir3 = Shell.WINDOWS ?
(new File(path3, "current").getAbsolutePath()) : path3.toString();
try {
cluster = new MiniDFSCluster.Builder(config)
.numDataNodes(0)
.manageNameDfsDirs(false).build();
cluster.waitActive();
secondary = new SecondaryNameNode(config);
printStorages(cluster.getNameNode().getFSImage());
FileSystem fs = cluster.getFileSystem();
Path path = new Path("/", "test");
assertTrue(fs.mkdirs(path));
// invalidate storage by removing rwx permission from name2 and name3
assertTrue(FileUtil.chmod(nameDir2, "000") == 0);
assertTrue(FileUtil.chmod(nameDir3, "000") == 0);
secondary.doCheckpoint(); // should remove name2 and name3
printStorages(cluster.getNameNode().getFSImage());
path = new Path("/", "test1");
assertTrue(fs.mkdirs(path));
assert (cluster.getNameNode().getFSImage().getStorage()
.getNumStorageDirs() == 1);
secondary.doCheckpoint(); // shouldn't be able to restore name 2 and 3
assert (cluster.getNameNode().getFSImage().getStorage()
.getNumStorageDirs() == 1);
assertTrue(FileUtil.chmod(nameDir2, "755") == 0);
assertTrue(FileUtil.chmod(nameDir3, "755") == 0);
secondary.doCheckpoint(); // should restore name 2 and 3
assert (cluster.getNameNode().getFSImage().getStorage()
.getNumStorageDirs() == 3);
} finally {
if (path2.exists()) {
FileUtil.chmod(nameDir2, "755");
}
if (path3.exists()) {
FileUtil.chmod(nameDir3, "755");
}
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
}
| 17,552
| 38.533784
| 103
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupOptionUpgrade.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.net.URI;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import org.junit.runners.Parameterized.Parameters;
/**
* This class tests various upgrade cases from earlier versions to current
* version with and without clusterid.
*/
@RunWith(value = Parameterized.class)
public class TestStartupOptionUpgrade {
private Configuration conf;
private StartupOption startOpt;
private int layoutVersion;
NNStorage storage;
@Parameters
public static Collection<Object[]> startOption() {
Object[][] params = new Object[][] { { StartupOption.UPGRADE },
{ StartupOption.UPGRADEONLY } };
return Arrays.asList(params);
}
public TestStartupOptionUpgrade(StartupOption startOption) {
super();
this.startOpt = startOption;
}
@Before
public void setUp() throws Exception {
conf = new HdfsConfiguration();
startOpt.setClusterId(null);
storage = new NNStorage(conf,
Collections.<URI>emptyList(),
Collections.<URI>emptyList());
}
@After
public void tearDown() throws Exception {
conf = null;
startOpt = null;
}
/**
* Tests the upgrade from version 0.20.204 to Federation version Test without
* clusterid the case: -upgrade
* Expected to generate clusterid
*
* @throws Exception
*/
@Test
public void testStartupOptUpgradeFrom204() throws Exception {
layoutVersion = Feature.RESERVED_REL20_204.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
assertTrue("Clusterid should start with CID", storage.getClusterID()
.startsWith("CID"));
}
/**
* Tests the upgrade from version 0.22 to Federation version Test with
* clusterid case: -upgrade -clusterid <cid>
* Expected to reuse user given clusterid
*
* @throws Exception
*/
@Test
public void testStartupOptUpgradeFrom22WithCID() throws Exception {
startOpt.setClusterId("cid");
layoutVersion = Feature.RESERVED_REL22.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
assertEquals("Clusterid should match with the given clusterid",
"cid", storage.getClusterID());
}
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test without clusterid case: -upgrade
* Expected to reuse existing clusterid
*
* @throws Exception
*/
@Test
public void testStartupOptUpgradeFromFederation()
throws Exception {
// Test assumes clusterid already exists, set the clusterid
storage.setClusterID("currentcid");
layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
assertEquals("Clusterid should match with the existing one",
"currentcid", storage.getClusterID());
}
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test with wrong clusterid case: -upgrade -clusterid <cid>
* Expected to reuse existing clusterid and ignore user given clusterid
*
* @throws Exception
*/
@Test
public void testStartupOptUpgradeFromFederationWithWrongCID()
throws Exception {
startOpt.setClusterId("wrong-cid");
storage.setClusterID("currentcid");
layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
assertEquals("Clusterid should match with the existing one",
"currentcid", storage.getClusterID());
}
/**
* Tests the upgrade from one version of Federation to another Federation
* version Test with correct clusterid case: -upgrade -clusterid <cid>
* Expected to reuse existing clusterid and ignore user given clusterid
*
* @throws Exception
*/
@Test
public void testStartupOptUpgradeFromFederationWithCID()
throws Exception {
startOpt.setClusterId("currentcid");
storage.setClusterID("currentcid");
layoutVersion = Feature.FEDERATION.getInfo().getLayoutVersion();
storage.processStartupOptionsForUpgrade(startOpt, layoutVersion);
assertEquals("Clusterid should match with the existing one",
"currentcid", storage.getClusterID());
}
}
| 5,595
| 33.331288
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileContextAcl.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.URI;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.junit.BeforeClass;
/**
* Tests for ACL operation through FileContext APIs
*/
public class TestFileContextAcl extends FSAclBaseTest {
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
startCluster();
}
@Override
protected FileSystem createFileSystem() throws Exception {
FileContextFS fcFs = new FileContextFS();
fcFs.initialize(FileSystem.getDefaultUri(conf), conf);
return fcFs;
}
/*
* To Re-use the FSAclBaseTest's testcases, creating a filesystem
* implementation which works based on fileContext. In this only overriding
* acl related methods, other operations will happen using normal filesystem
* itself which is out of scope for this test
*/
public static class FileContextFS extends DistributedFileSystem {
private FileContext fc;
@Override
public void initialize(URI uri, Configuration conf) throws IOException {
super.initialize(uri, conf);
fc = FileContext.getFileContext(conf);
}
@Override
public void modifyAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fc.modifyAclEntries(path, aclSpec);
}
@Override
public void removeAclEntries(Path path, List<AclEntry> aclSpec)
throws IOException {
fc.removeAclEntries(path, aclSpec);
}
@Override
public void removeDefaultAcl(Path path) throws IOException {
fc.removeDefaultAcl(path);
}
@Override
public void removeAcl(Path path) throws IOException {
fc.removeAcl(path);
}
@Override
public void setAcl(Path path, List<AclEntry> aclSpec) throws IOException {
fc.setAcl(path, aclSpec);
}
@Override
public AclStatus getAclStatus(Path path) throws IOException {
return fc.getAclStatus(path);
}
}
}
| 3,072
| 29.73
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import java.io.FileNotFoundException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
/** Test snapshot related operations. */
public class TestSnapshotPathINodes {
private static final long seed = 0;
private static final short REPLICATION = 3;
static private final Path dir = new Path("/TestSnapshot");
static private final Path sub1 = new Path(dir, "sub1");
static private final Path file1 = new Path(sub1, "file1");
static private final Path file2 = new Path(sub1, "file2");
static private MiniDFSCluster cluster;
static private FSDirectory fsdir;
static private DistributedFileSystem hdfs;
@BeforeClass
public static void setUp() throws Exception {
Configuration conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
}
@Before
public void reset() throws Exception {
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
}
@AfterClass
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/** Test allow-snapshot operation. */
@Test (timeout=15000)
public void testAllowSnapshot() throws Exception {
final String pathStr = sub1.toString();
final INode before = fsdir.getINode(pathStr);
// Before a directory is snapshottable
Assert.assertFalse(before.asDirectory().isSnapshottable());
// After a directory is snapshottable
final Path path = new Path(pathStr);
hdfs.allowSnapshot(path);
{
final INode after = fsdir.getINode(pathStr);
Assert.assertTrue(after.asDirectory().isSnapshottable());
}
hdfs.disallowSnapshot(path);
{
final INode after = fsdir.getINode(pathStr);
Assert.assertFalse(after.asDirectory().isSnapshottable());
}
}
static Snapshot getSnapshot(INodesInPath inodesInPath, String name,
int index) {
if (name == null) {
return null;
}
final INode inode = inodesInPath.getINode(index - 1);
return inode.asDirectory().getSnapshot(DFSUtil.string2Bytes(name));
}
static void assertSnapshot(INodesInPath inodesInPath, boolean isSnapshot,
final Snapshot snapshot, int index) {
assertEquals(isSnapshot, inodesInPath.isSnapshot());
assertEquals(Snapshot.getSnapshotId(isSnapshot ? snapshot : null),
inodesInPath.getPathSnapshotId());
if (!isSnapshot) {
assertEquals(Snapshot.getSnapshotId(snapshot),
inodesInPath.getLatestSnapshotId());
}
if (isSnapshot && index >= 0) {
assertEquals(Snapshot.Root.class, inodesInPath.getINode(index).getClass());
}
}
static void assertINodeFile(INode inode, Path path) {
assertEquals(path.getName(), inode.getLocalName());
assertEquals(INodeFile.class, inode.getClass());
}
/**
* for normal (non-snapshot) file.
*/
@Test (timeout=15000)
public void testNonSnapshotPathINodes() throws Exception {
// Get the inodes by resolving the path of a normal file
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
// The number of inodes should be equal to components.length
assertEquals(nodesInPath.length(), components.length);
// The returned nodesInPath should be non-snapshot
assertSnapshot(nodesInPath, false, null, -1);
// The last INode should be associated with file1
assertTrue("file1=" + file1 + ", nodesInPath=" + nodesInPath,
nodesInPath.getINode(components.length - 1) != null);
assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
file1.toString());
assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
sub1.toString());
assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
dir.toString());
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
assertEquals(nodesInPath.length(), components.length);
assertSnapshot(nodesInPath, false, null, -1);
assertEquals(nodesInPath.getLastINode().getFullPathName(), file1.toString());
}
/**
* for snapshot file.
*/
@Test (timeout=15000)
public void testSnapshotPathINodes() throws Exception {
// Create a snapshot for the dir, and check the inodes for the path
// pointing to a snapshot file
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1, "s1");
// The path when accessing the snapshot file of file1 is
// /TestSnapshot/sub1/.snapshot/s1/file1
String snapshotPath = sub1.toString() + "/.snapshot/s1/file1";
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
assertEquals(nodesInPath.length(), components.length - 1);
// SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s1, file1}
final Snapshot snapshot = getSnapshot(nodesInPath, "s1", 3);
assertSnapshot(nodesInPath, true, snapshot, 3);
// Check the INode for file1 (snapshot file)
INode snapshotFileNode = nodesInPath.getLastINode();
assertINodeFile(snapshotFileNode, file1);
assertTrue(snapshotFileNode.getParent().isWithSnapshot());
// Call getExistingPathINodes and request only one INode.
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
assertEquals(nodesInPath.length(), components.length - 1);
assertSnapshot(nodesInPath, true, snapshot, 3);
// Check the INode for file1 (snapshot file)
assertINodeFile(nodesInPath.getLastINode(), file1);
// Resolve the path "/TestSnapshot/sub1/.snapshot"
String dotSnapshotPath = sub1.toString() + "/.snapshot";
names = INode.getPathNames(dotSnapshotPath);
components = INode.getPathComponents(names);
nodesInPath = INodesInPath.resolve(fsdir.rootDir, components, false);
// The number of INodes returned should still be components.length
// since we put a null in the inode array for ".snapshot"
assertEquals(nodesInPath.length(), components.length);
// No SnapshotRoot dir is included in the resolved inodes
assertSnapshot(nodesInPath, true, snapshot, -1);
// The last INode should be null, the last but 1 should be sub1
assertNull(nodesInPath.getLastINode());
assertEquals(nodesInPath.getINode(-2).getFullPathName(), sub1.toString());
assertTrue(nodesInPath.getINode(-2).isDirectory());
String[] invalidPathComponent = {"invalidDir", "foo", ".snapshot", "bar"};
Path invalidPath = new Path(invalidPathComponent[0]);
for(int i = 1; i < invalidPathComponent.length; i++) {
invalidPath = new Path(invalidPath, invalidPathComponent[i]);
try {
hdfs.getFileStatus(invalidPath);
Assert.fail();
} catch(FileNotFoundException fnfe) {
System.out.println("The exception is expected: " + fnfe);
}
}
hdfs.deleteSnapshot(sub1, "s1");
hdfs.disallowSnapshot(sub1);
}
/**
* for snapshot file after deleting the original file.
*/
@Test (timeout=15000)
public void testSnapshotPathINodesAfterDeletion() throws Exception {
// Create a snapshot for the dir, and check the inodes for the path
// pointing to a snapshot file
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1, "s2");
// Delete the original file /TestSnapshot/sub1/file1
hdfs.delete(file1, false);
final Snapshot snapshot;
{
// Resolve the path for the snapshot file
// /TestSnapshot/sub1/.snapshot/s2/file1
String snapshotPath = sub1.toString() + "/.snapshot/s2/file1";
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
assertEquals(nodesInPath.length(), components.length - 1);
// SnapshotRootIndex should be 3: {root, Testsnapshot, sub1, s2, file1}
snapshot = getSnapshot(nodesInPath, "s2", 3);
assertSnapshot(nodesInPath, true, snapshot, 3);
// Check the INode for file1 (snapshot file)
final INode inode = nodesInPath.getLastINode();
assertEquals(file1.getName(), inode.getLocalName());
assertTrue(inode.asFile().isWithSnapshot());
}
// Check the INodes for path /TestSnapshot/sub1/file1
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
// The length of inodes should be equal to components.length
assertEquals(nodesInPath.length(), components.length);
// The number of non-null elements should be components.length - 1 since
// file1 has been deleted
assertEquals(getNumNonNull(nodesInPath), components.length - 1);
// The returned nodesInPath should be non-snapshot
assertSnapshot(nodesInPath, false, snapshot, -1);
// The last INode should be null, and the one before should be associated
// with sub1
assertNull(nodesInPath.getINode(components.length - 1));
assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
sub1.toString());
assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
dir.toString());
hdfs.deleteSnapshot(sub1, "s2");
hdfs.disallowSnapshot(sub1);
}
private int getNumNonNull(INodesInPath iip) {
List<INode> inodes = iip.getReadOnlyINodes();
for (int i = inodes.size() - 1; i >= 0; i--) {
if (inodes.get(i) != null) {
return i+1;
}
}
return 0;
}
/**
* for snapshot file while adding a new file after snapshot.
*/
@Test (timeout=15000)
public void testSnapshotPathINodesWithAddedFile() throws Exception {
// Create a snapshot for the dir, and check the inodes for the path
// pointing to a snapshot file
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1, "s4");
// Add a new file /TestSnapshot/sub1/file3
final Path file3 = new Path(sub1, "file3");
DFSTestUtil.createFile(hdfs, file3, 1024, REPLICATION, seed);
Snapshot s4;
{
// Check the inodes for /TestSnapshot/sub1/.snapshot/s4/file3
String snapshotPath = sub1.toString() + "/.snapshot/s4/file3";
String[] names = INode.getPathNames(snapshotPath);
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
// Length of inodes should be (components.length - 1), since we will ignore
// ".snapshot"
assertEquals(nodesInPath.length(), components.length - 1);
// The number of non-null inodes should be components.length - 2, since
// snapshot of file3 does not exist
assertEquals(getNumNonNull(nodesInPath), components.length - 2);
s4 = getSnapshot(nodesInPath, "s4", 3);
// SnapshotRootIndex should still be 3: {root, Testsnapshot, sub1, s4, null}
assertSnapshot(nodesInPath, true, s4, 3);
// Check the last INode in inodes, which should be null
assertNull(nodesInPath.getINode(nodesInPath.length() - 1));
}
// Check the inodes for /TestSnapshot/sub1/file3
String[] names = INode.getPathNames(file3.toString());
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
// The number of inodes should be equal to components.length
assertEquals(nodesInPath.length(), components.length);
// The returned nodesInPath should be non-snapshot
assertSnapshot(nodesInPath, false, s4, -1);
// The last INode should be associated with file3
assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
file3.toString());
assertEquals(nodesInPath.getINode(components.length - 2).getFullPathName(),
sub1.toString());
assertEquals(nodesInPath.getINode(components.length - 3).getFullPathName(),
dir.toString());
hdfs.deleteSnapshot(sub1, "s4");
hdfs.disallowSnapshot(sub1);
}
/**
* for snapshot file while modifying file after snapshot.
*/
@Test (timeout=15000)
public void testSnapshotPathINodesAfterModification() throws Exception {
// First check the INode for /TestSnapshot/sub1/file1
String[] names = INode.getPathNames(file1.toString());
byte[][] components = INode.getPathComponents(names);
INodesInPath nodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
// The number of inodes should be equal to components.length
assertEquals(nodesInPath.length(), components.length);
// The last INode should be associated with file1
assertEquals(nodesInPath.getINode(components.length - 1).getFullPathName(),
file1.toString());
// record the modification time of the inode
final long modTime = nodesInPath.getINode(nodesInPath.length() - 1)
.getModificationTime();
// Create a snapshot for the dir, and check the inodes for the path
// pointing to a snapshot file
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1, "s3");
// Modify file1
DFSTestUtil.appendFile(hdfs, file1, "the content for appending");
// Check the INodes for snapshot of file1
String snapshotPath = sub1.toString() + "/.snapshot/s3/file1";
names = INode.getPathNames(snapshotPath);
components = INode.getPathComponents(names);
INodesInPath ssNodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
// Length of ssInodes should be (components.length - 1), since we will
// ignore ".snapshot"
assertEquals(ssNodesInPath.length(), components.length - 1);
final Snapshot s3 = getSnapshot(ssNodesInPath, "s3", 3);
assertSnapshot(ssNodesInPath, true, s3, 3);
// Check the INode for snapshot of file1
INode snapshotFileNode = ssNodesInPath.getLastINode();
assertEquals(snapshotFileNode.getLocalName(), file1.getName());
assertTrue(snapshotFileNode.asFile().isWithSnapshot());
// The modification time of the snapshot INode should be the same with the
// original INode before modification
assertEquals(modTime,
snapshotFileNode.getModificationTime(ssNodesInPath.getPathSnapshotId()));
// Check the INode for /TestSnapshot/sub1/file1 again
names = INode.getPathNames(file1.toString());
components = INode.getPathComponents(names);
INodesInPath newNodesInPath = INodesInPath.resolve(fsdir.rootDir,
components, false);
assertSnapshot(newNodesInPath, false, s3, -1);
// The number of inodes should be equal to components.length
assertEquals(newNodesInPath.length(), components.length);
// The last INode should be associated with file1
final int last = components.length - 1;
assertEquals(newNodesInPath.getINode(last).getFullPathName(),
file1.toString());
// The modification time of the INode for file3 should have been changed
Assert.assertFalse(modTime == newNodesInPath.getINode(last).getModificationTime());
hdfs.deleteSnapshot(sub1, "s3");
hdfs.disallowSnapshot(sub1);
}
}
| 17,220
| 39.615566
| 87
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileLimit.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
/**
* This class tests that a file system adheres to the limit of
* maximum number of files that is configured.
*/
public class TestFileLimit {
static final long seed = 0xDEADBEEFL;
static final int blockSize = 8192;
boolean simulatedStorage = false;
// creates a zero file.
private void createFile(FileSystem fileSys, Path name)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) 1, blockSize);
byte[] buffer = new byte[1024];
Random rand = new Random(seed);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
private void waitForLimit(FSNamesystem namesys, long num)
{
// wait for number of blocks to decrease
while (true) {
long total = namesys.getBlocksTotal() + namesys.dir.totalInodes();
System.out.println("Comparing current nodes " + total +
" to become " + num);
if (total == num) {
break;
}
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
}
}
}
/**
* Test that file data becomes available before file is closed.
*/
@Test
public void testFileLimit() throws IOException {
Configuration conf = new HdfsConfiguration();
int maxObjects = 5;
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY, maxObjects);
conf.setLong(DFSConfigKeys.DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, 1000L);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
int currentNodes = 0;
if (simulatedStorage) {
SimulatedFSDataset.setFactory(conf);
}
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
FSNamesystem namesys = cluster.getNamesystem();
try {
//
// check that / exists
//
Path path = new Path("/");
assertTrue("/ should be a directory",
fs.getFileStatus(path).isDirectory());
currentNodes = 1; // root inode
// verify that we can create the specified number of files. We leave
// one for the "/". Each file takes an inode and a block.
//
for (int i = 0; i < maxObjects/2; i++) {
Path file = new Path("/filestatus" + i);
createFile(fs, file);
System.out.println("Created file " + file);
currentNodes += 2; // two more objects for this creation.
}
// verify that creating another file fails
boolean hitException = false;
try {
Path file = new Path("/filestatus");
createFile(fs, file);
System.out.println("Created file " + file);
} catch (IOException e) {
hitException = true;
}
assertTrue("Was able to exceed file limit", hitException);
// delete one file
Path file0 = new Path("/filestatus0");
fs.delete(file0, true);
System.out.println("Deleted file " + file0);
currentNodes -= 2;
// wait for number of blocks to decrease
waitForLimit(namesys, currentNodes);
// now, we shud be able to create a new file
createFile(fs, file0);
System.out.println("Created file " + file0 + " again.");
currentNodes += 2;
// delete the file again
file0 = new Path("/filestatus0");
fs.delete(file0, true);
System.out.println("Deleted file " + file0 + " again.");
currentNodes -= 2;
// wait for number of blocks to decrease
waitForLimit(namesys, currentNodes);
// create two directories in place of the file that we deleted
Path dir = new Path("/dir0/dir1");
fs.mkdirs(dir);
System.out.println("Created directories " + dir);
currentNodes += 2;
waitForLimit(namesys, currentNodes);
// verify that creating another directory fails
hitException = false;
try {
fs.mkdirs(new Path("dir.fail"));
System.out.println("Created directory should not have succeeded.");
} catch (IOException e) {
hitException = true;
}
assertTrue("Was able to exceed dir limit", hitException);
} finally {
fs.close();
cluster.shutdown();
}
}
@Test
public void testFileLimitSimulated() throws IOException {
simulatedStorage = true;
testFileLimit();
simulatedStorage = false;
}
@Test(timeout=60000)
public void testMaxBlocksPerFileLimit() throws Exception {
Configuration conf = new HdfsConfiguration();
// Make a small block size and a low limit
final long blockSize = 4096;
final long numBlocks = 2;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY, numBlocks);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
HdfsDataOutputStream fout =
(HdfsDataOutputStream)fs.create(new Path("/testmaxfilelimit"));
try {
// Write maximum number of blocks
fout.write(new byte[(int)blockSize*(int)numBlocks]);
fout.hflush();
// Try to write one more block
try {
fout.write(new byte[1]);
fout.hflush();
assert false : "Expected IOException after writing too many blocks";
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("File has reached the limit" +
" on maximum number of", e);
}
} finally {
cluster.shutdown();
}
}
@Test(timeout=60000)
public void testMinBlockSizeLimit() throws Exception {
final long blockSize = 4096;
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, blockSize);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
try {
// Try with min block size
fs.create(new Path("/testmblock1"), true, 4096, (short)3, blockSize);
try {
// Try with min block size - 1
fs.create(new Path("/testmblock2"), true, 4096, (short)3, blockSize-1);
assert false : "Expected IOException after creating a file with small" +
" blocks ";
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("Specified block size is less",
e);
}
} finally {
cluster.shutdown();
}
}
}
| 7,990
| 32.860169
| 80
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.MkdirOp;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.ha.EditLogTailer;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.HeartbeatResponse;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.ipc.StandbyException;
import org.apache.hadoop.security.AccessControlException;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
/**
* This is a utility class to expose NameNode functionality for unit tests.
*/
public class NameNodeAdapter {
/**
* Get the namesystem from the namenode
*/
public static FSNamesystem getNamesystem(NameNode namenode) {
return namenode.getNamesystem();
}
/**
* Get block locations within the specified range.
*/
public static LocatedBlocks getBlockLocations(NameNode namenode,
String src, long offset, long length) throws IOException {
return namenode.getNamesystem().getBlockLocations("foo",
src, offset, length);
}
public static HdfsFileStatus getFileInfo(NameNode namenode, String src,
boolean resolveLink) throws AccessControlException, UnresolvedLinkException,
StandbyException, IOException {
return FSDirStatAndListingOp.getFileInfo(namenode.getNamesystem()
.getFSDirectory(), src, resolveLink);
}
public static boolean mkdirs(NameNode namenode, String src,
PermissionStatus permissions, boolean createParent)
throws UnresolvedLinkException, IOException {
return namenode.getNamesystem().mkdirs(src, permissions, createParent);
}
public static void saveNamespace(NameNode namenode)
throws AccessControlException, IOException {
namenode.getNamesystem().saveNamespace();
}
public static void enterSafeMode(NameNode namenode, boolean resourcesLow)
throws IOException {
namenode.getNamesystem().enterSafeMode(resourcesLow);
}
public static void leaveSafeMode(NameNode namenode) {
namenode.getNamesystem().leaveSafeMode();
}
public static void abortEditLogs(NameNode nn) {
FSEditLog el = nn.getFSImage().getEditLog();
el.abortCurrentLogSegment();
}
/**
* Get the internal RPC server instance.
* @return rpc server
*/
public static Server getRpcServer(NameNode namenode) {
return ((NameNodeRpcServer)namenode.getRpcServer()).clientRpcServer;
}
public static DelegationTokenSecretManager getDtSecretManager(
final FSNamesystem ns) {
return ns.getDelegationTokenSecretManager();
}
public static HeartbeatResponse sendHeartBeat(DatanodeRegistration nodeReg,
DatanodeDescriptor dd, FSNamesystem namesystem) throws IOException {
return namesystem.handleHeartbeat(nodeReg,
BlockManagerTestUtil.getStorageReportsForDatanode(dd),
dd.getCacheCapacity(), dd.getCacheRemaining(), 0, 0, 0, null, true);
}
public static boolean setReplication(final FSNamesystem ns,
final String src, final short replication) throws IOException {
return ns.setReplication(src, replication);
}
public static LeaseManager getLeaseManager(final FSNamesystem ns) {
return ns.leaseManager;
}
/** Set the softLimit and hardLimit of client lease periods. */
public static void setLeasePeriod(final FSNamesystem namesystem, long soft, long hard) {
getLeaseManager(namesystem).setLeasePeriod(soft, hard);
namesystem.leaseManager.triggerMonitorCheckNow();
}
public static Lease getLeaseForPath(NameNode nn, String path) {
final FSNamesystem fsn = nn.getNamesystem();
INode inode;
try {
inode = fsn.getFSDirectory().getINode(path, false);
} catch (UnresolvedLinkException e) {
throw new RuntimeException("Lease manager should not support symlinks");
}
return inode == null ? null : fsn.leaseManager.getLease((INodeFile) inode);
}
public static String getLeaseHolderForPath(NameNode namenode, String path) {
Lease l = getLeaseForPath(namenode, path);
return l == null? null: l.getHolder();
}
/**
* @return the timestamp of the last renewal of the given lease,
* or -1 in the case that the lease doesn't exist.
*/
public static long getLeaseRenewalTime(NameNode nn, String path) {
Lease l = getLeaseForPath(nn, path);
return l == null ? -1 : l.getLastUpdate();
}
/**
* Return the datanode descriptor for the given datanode.
*/
public static DatanodeDescriptor getDatanode(final FSNamesystem ns,
DatanodeID id) throws IOException {
ns.readLock();
try {
return ns.getBlockManager().getDatanodeManager().getDatanode(id);
} finally {
ns.readUnlock();
}
}
/**
* Return the FSNamesystem stats
*/
public static long[] getStats(final FSNamesystem fsn) {
return fsn.getStats();
}
public static ReentrantReadWriteLock spyOnFsLock(FSNamesystem fsn) {
ReentrantReadWriteLock spy = Mockito.spy(fsn.getFsLockForTests());
fsn.setFsLockForTests(spy);
return spy;
}
public static FSImage spyOnFsImage(NameNode nn1) {
FSNamesystem fsn = nn1.getNamesystem();
FSImage spy = Mockito.spy(fsn.getFSImage());
Whitebox.setInternalState(fsn, "fsImage", spy);
return spy;
}
public static FSEditLog spyOnEditLog(NameNode nn) {
FSEditLog spyEditLog = spy(nn.getNamesystem().getFSImage().getEditLog());
DFSTestUtil.setEditLogForTesting(nn.getNamesystem(), spyEditLog);
EditLogTailer tailer = nn.getNamesystem().getEditLogTailer();
if (tailer != null) {
tailer.setEditLog(spyEditLog);
}
return spyEditLog;
}
public static JournalSet spyOnJournalSet(NameNode nn) {
FSEditLog editLog = nn.getFSImage().getEditLog();
JournalSet js = Mockito.spy(editLog.getJournalSet());
editLog.setJournalSetForTesting(js);
return js;
}
public static String getMkdirOpPath(FSEditLogOp op) {
if (op.opCode == FSEditLogOpCodes.OP_MKDIR) {
return ((MkdirOp) op).path;
} else {
return null;
}
}
public static FSEditLogOp createMkdirOp(String path) {
MkdirOp op = MkdirOp.getInstance(new FSEditLogOp.OpInstanceCache())
.setPath(path)
.setTimestamp(0)
.setPermissionStatus(new PermissionStatus(
"testuser", "testgroup", FsPermission.getDefault()));
return op;
}
/**
* @return the number of blocks marked safe by safemode, or -1
* if safemode is not running.
*/
public static int getSafeModeSafeBlocks(NameNode nn) {
SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
if (smi == null) {
return -1;
}
return smi.blockSafe;
}
/**
* @return Replication queue initialization status
*/
public static boolean safeModeInitializedReplQueues(NameNode nn) {
return nn.getNamesystem().isPopulatingReplQueues();
}
public static File getInProgressEditsFile(StorageDirectory sd, long startTxId) {
return NNStorage.getInProgressEditsFile(sd, startTxId);
}
public static NamenodeCommand startCheckpoint(NameNode nn,
NamenodeRegistration backupNode, NamenodeRegistration activeNamenode)
throws IOException {
return nn.getNamesystem().startCheckpoint(backupNode, activeNamenode);
}
}
| 9,212
| 34.434615
| 90
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.test.PathUtils;
import org.junit.Before;
import org.junit.Test;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
/**
* This class tests various combinations of dfs.namenode.name.dir
* and dfs.namenode.edits.dir configurations.
*/
public class TestNameEditsConfigs {
private static final Log LOG = LogFactory.getLog(FSEditLog.class);
static final long SEED = 0xDEADBEEFL;
static final int BLOCK_SIZE = 4096;
static final int FILE_SIZE = 8192;
static final int NUM_DATA_NODES = 3;
static final String FILE_IMAGE = "current/fsimage";
static final String FILE_EDITS = "current/edits";
short replication = 3;
private final File base_dir = new File(
PathUtils.getTestDir(TestNameEditsConfigs.class), "dfs");
@Before
public void setUp() throws IOException {
if(base_dir.exists() && !FileUtil.fullyDelete(base_dir)) {
throw new IOException("Cannot remove directory " + base_dir);
}
}
private void writeFile(FileSystem fileSys, Path name, int repl)
throws IOException {
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
(short) repl, BLOCK_SIZE);
byte[] buffer = new byte[FILE_SIZE];
Random rand = new Random(SEED);
rand.nextBytes(buffer);
stm.write(buffer);
stm.close();
}
void checkImageAndEditsFilesExistence(File dir,
boolean shouldHaveImages,
boolean shouldHaveEdits)
throws IOException {
FSImageTransactionalStorageInspector ins = inspect(dir);
if (shouldHaveImages) {
assertTrue("Expect images in " + dir, ins.foundImages.size() > 0);
} else {
assertTrue("Expect no images in " + dir, ins.foundImages.isEmpty());
}
List<FileJournalManager.EditLogFile> editlogs
= FileJournalManager.matchEditLogs(new File(dir, "current").listFiles());
if (shouldHaveEdits) {
assertTrue("Expect edits in " + dir, editlogs.size() > 0);
} else {
assertTrue("Expect no edits in " + dir, editlogs.isEmpty());
}
}
private void checkFile(FileSystem fileSys, Path name, int repl)
throws IOException {
assertTrue(fileSys.exists(name));
int replication = fileSys.getFileStatus(name).getReplication();
assertEquals("replication for " + name, repl, replication);
long size = fileSys.getContentSummary(name).getLength();
assertEquals("file size for " + name, size, FILE_SIZE);
}
private void cleanupFile(FileSystem fileSys, Path name)
throws IOException {
assertTrue(fileSys.exists(name));
fileSys.delete(name, true);
assertTrue(!fileSys.exists(name));
}
SecondaryNameNode startSecondaryNameNode(Configuration conf
) throws IOException {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
return new SecondaryNameNode(conf);
}
/**
* Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
* The test creates files and restarts cluster with different configs.
* 1. Starts cluster with shared name and edits dirs
* 2. Restarts cluster by adding additional (different) name and edits dirs
* 3. Restarts cluster by removing shared name and edits dirs by allowing to
* start using separate name and edits dirs
* 4. Restart cluster by adding shared directory again, but make sure we
* do not read any stale image or edits.
* All along the test, we create and delete files at reach restart to make
* sure we are reading proper edits and image.
* @throws Exception
*/
@Test
public void testNameEditsConfigs() throws Exception {
Path file1 = new Path("TestNameEditsConfigs1");
Path file2 = new Path("TestNameEditsConfigs2");
Path file3 = new Path("TestNameEditsConfigs3");
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = null;
FileSystem fileSys = null;
final File newNameDir = new File(base_dir, "name");
final File newEditsDir = new File(base_dir, "edits");
final File nameAndEdits = new File(base_dir, "name_and_edits");
final File checkpointNameDir = new File(base_dir, "secondname");
final File checkpointEditsDir = new File(base_dir, "secondedits");
final File checkpointNameAndEdits = new File(base_dir, "second_name_and_edits");
ImmutableList<File> allCurrentDirs = ImmutableList.of(
new File(nameAndEdits, "current"),
new File(newNameDir, "current"),
new File(newEditsDir, "current"),
new File(checkpointNameAndEdits, "current"),
new File(checkpointNameDir, "current"),
new File(checkpointEditsDir, "current"));
ImmutableList<File> imageCurrentDirs = ImmutableList.of(
new File(nameAndEdits, "current"),
new File(newNameDir, "current"),
new File(checkpointNameAndEdits, "current"),
new File(checkpointNameDir, "current"));
// Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointNameAndEdits.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
// Manage our own dfs directories
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.manageNameDfsDirs(false).build();
cluster.waitActive();
secondary = startSecondaryNameNode(conf);
fileSys = cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication);
secondary.doCheckpoint();
} finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
// Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration();
assertTrue(newNameDir.mkdir());
assertTrue(newEditsDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() +
"," + newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits.getPath() +
"," + newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath() +
"," + checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() +
"," + checkpointNameAndEdits.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
// Manage our own dfs directories. Do not format.
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
secondary = startSecondaryNameNode(conf);
fileSys = cluster.getFileSystem();
try {
assertTrue(fileSys.exists(file1));
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
writeFile(fileSys, file2, replication);
checkFile(fileSys, file2, replication);
secondary.doCheckpoint();
} finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
FSImageTestUtil.assertParallelFilesAreIdentical(allCurrentDirs,
ImmutableSet.of("VERSION"));
FSImageTestUtil.assertSameNewestImage(imageCurrentDirs);
// Now remove common directory both have and start namenode with
// separate name and edits dirs
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
secondary = startSecondaryNameNode(conf);
fileSys = cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys, file2, replication);
cleanupFile(fileSys, file2);
writeFile(fileSys, file3, replication);
checkFile(fileSys, file3, replication);
secondary.doCheckpoint();
} finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
// No edit logs in new name dir
checkImageAndEditsFilesExistence(newNameDir, true, false);
checkImageAndEditsFilesExistence(newEditsDir, false, true);
checkImageAndEditsFilesExistence(checkpointNameDir, true, false);
checkImageAndEditsFilesExistence(checkpointEditsDir, false, true);
// Add old name_and_edits dir. File system should not read image or edits
// from old dir
assertTrue(FileUtil.fullyDelete(new File(nameAndEdits, "current")));
assertTrue(FileUtil.fullyDelete(new File(checkpointNameAndEdits, "current")));
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEdits.getPath() +
"," + newNameDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEdits +
"," + newEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, checkpointNameDir.getPath() +
"," + checkpointNameAndEdits.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, checkpointEditsDir.getPath() +
"," + checkpointNameAndEdits.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
secondary = startSecondaryNameNode(conf);
fileSys = cluster.getFileSystem();
try {
assertTrue(!fileSys.exists(file1));
assertTrue(!fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys, file3, replication);
secondary.doCheckpoint();
} finally {
fileSys.close();
cluster.shutdown();
secondary.shutdown();
}
checkImageAndEditsFilesExistence(nameAndEdits, true, true);
checkImageAndEditsFilesExistence(checkpointNameAndEdits, true, true);
}
private FSImageTransactionalStorageInspector inspect(File storageDir)
throws IOException {
return FSImageTestUtil.inspectStorageDirectory(
new File(storageDir, "current"), NameNodeDirType.IMAGE_AND_EDITS);
}
/**
* Test edits.dir.required configuration options.
* 1. Directory present in dfs.namenode.edits.dir.required but not in
* dfs.namenode.edits.dir. Expected to fail.
* 2. Directory present in both dfs.namenode.edits.dir.required and
* dfs.namenode.edits.dir. Expected to succeed.
* 3. Directory present only in dfs.namenode.edits.dir. Expected to
* succeed.
*/
@Test
public void testNameEditsRequiredConfigs() throws IOException {
MiniDFSCluster cluster = null;
File nameAndEditsDir = new File(base_dir, "name_and_edits");
File nameAndEditsDir2 = new File(base_dir, "name_and_edits2");
File nameDir = new File(base_dir, "name");
// 1
// Bad configuration. Add a directory to dfs.namenode.edits.dir.required
// without adding it to dfs.namenode.edits.dir.
try {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
conf.set(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
nameAndEditsDir2.toURI().toString());
conf.set(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
nameAndEditsDir.toURI().toString());
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.manageNameDfsDirs(false)
.build();
fail("Successfully started cluster but should not have been able to.");
} catch (IllegalArgumentException iae) { // expect to fail
LOG.info("EXPECTED: cluster start failed due to bad configuration" + iae);
} finally {
if (cluster != null) {
cluster.shutdown();
}
cluster = null;
}
// 2
// Good configuration. Add a directory to both dfs.namenode.edits.dir.required
// and dfs.namenode.edits.dir.
try {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
conf.setStrings(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
nameAndEditsDir.toURI().toString(),
nameAndEditsDir2.toURI().toString());
conf.set(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_REQUIRED_KEY,
nameAndEditsDir2.toURI().toString());
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.manageNameDfsDirs(false)
.build();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
// 3
// Good configuration. Adds a directory to dfs.namenode.edits.dir but not to
// dfs.namenode.edits.dir.required.
try {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
conf.setStrings(
DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY,
nameAndEditsDir.toURI().toString(),
nameAndEditsDir2.toURI().toString());
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.manageNameDfsDirs(false)
.build();
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test various configuration options of dfs.namenode.name.dir and dfs.namenode.edits.dir
* This test tries to simulate failure scenarios.
* 1. Start cluster with shared name and edits dir
* 2. Restart cluster by adding separate name and edits dirs
* 3. Restart cluster by removing shared name and edits dir
* 4. Restart cluster with old shared name and edits dir, but only latest
* name dir. This should fail since we don't have latest edits dir
* 5. Restart cluster with old shared name and edits dir, but only latest
* edits dir. This should succeed since the latest edits will have
* segments leading all the way from the image in name_and_edits.
*/
@Test
public void testNameEditsConfigsFailure() throws IOException {
Path file1 = new Path("TestNameEditsConfigs1");
Path file2 = new Path("TestNameEditsConfigs2");
Path file3 = new Path("TestNameEditsConfigs3");
MiniDFSCluster cluster = null;
Configuration conf = null;
FileSystem fileSys = null;
File nameOnlyDir = new File(base_dir, "name");
File editsOnlyDir = new File(base_dir, "edits");
File nameAndEditsDir = new File(base_dir, "name_and_edits");
// 1
// Start namenode with same dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
// Manage our own dfs directories
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
// Check that the dir has a VERSION file
assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem();
assertTrue(!fileSys.exists(file1));
writeFile(fileSys, file1, replication);
checkFile(fileSys, file1, replication);
} finally {
fileSys.close();
cluster.shutdown();
}
// 2
// Start namenode with additional dfs.namenode.name.dir and dfs.namenode.edits.dir
conf = new HdfsConfiguration();
assertTrue(nameOnlyDir.mkdir());
assertTrue(editsOnlyDir.mkdir());
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath() +
"," + nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath() +
"," + editsOnlyDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
// Manage our own dfs directories. Do not format.
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
// Check that the dirs have a VERSION file
assertTrue(new File(nameAndEditsDir, "current/VERSION").exists());
assertTrue(new File(nameOnlyDir, "current/VERSION").exists());
assertTrue(new File(editsOnlyDir, "current/VERSION").exists());
fileSys = cluster.getFileSystem();
assertTrue(fileSys.exists(file1));
checkFile(fileSys, file1, replication);
cleanupFile(fileSys, file1);
writeFile(fileSys, file2, replication);
checkFile(fileSys, file2, replication);
} finally {
fileSys.close();
cluster.shutdown();
}
// 3
// Now remove common directory both have and start namenode with
// separate name and edits dirs
try {
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertTrue(fileSys.exists(file2));
checkFile(fileSys, file2, replication);
cleanupFile(fileSys, file2);
writeFile(fileSys, file3, replication);
checkFile(fileSys, file3, replication);
} finally {
fileSys.close();
cluster.shutdown();
}
// 4
// Add old shared directory for name and edits along with latest name
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameOnlyDir.getPath() + "," +
nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
fail("Successfully started cluster but should not have been able to.");
} catch (IOException e) { // expect to fail
LOG.info("EXPECTED: cluster start failed due to missing " +
"latest edits dir", e);
} finally {
if (cluster != null) {
cluster.shutdown();
}
cluster = null;
}
// 5
// Add old shared directory for name and edits along with latest edits.
// This is OK, since the latest edits will have segments leading all
// the way from the image in name_and_edits.
conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameAndEditsDir.getPath());
conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, editsOnlyDir.getPath() +
"," + nameAndEditsDir.getPath());
replication = (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY, 3);
try {
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(NUM_DATA_NODES)
.format(false)
.manageNameDfsDirs(false)
.build();
fileSys = cluster.getFileSystem();
assertFalse(fileSys.exists(file1));
assertFalse(fileSys.exists(file2));
assertTrue(fileSys.exists(file3));
checkFile(fileSys, file3, replication);
cleanupFile(fileSys, file3);
writeFile(fileSys, file3, replication);
checkFile(fileSys, file3, replication);
} finally {
fileSys.close();
cluster.shutdown();
}
}
/**
* Test dfs.namenode.checkpoint.dir and dfs.namenode.checkpoint.edits.dir
* should tolerate white space between values.
*/
@Test
public void testCheckPointDirsAreTrimmed() throws Exception {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
File checkpointNameDir1 = new File(base_dir, "chkptName1");
File checkpointEditsDir1 = new File(base_dir, "chkptEdits1");
File checkpointNameDir2 = new File(base_dir, "chkptName2");
File checkpointEditsDir2 = new File(base_dir, "chkptEdits2");
File nameDir = new File(base_dir, "name1");
String whiteSpace = " \n \n ";
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getPath());
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY, whiteSpace
+ checkpointNameDir1.getPath() + whiteSpace, whiteSpace
+ checkpointNameDir2.getPath() + whiteSpace);
conf.setStrings(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
whiteSpace + checkpointEditsDir1.getPath() + whiteSpace, whiteSpace
+ checkpointEditsDir2.getPath() + whiteSpace);
cluster = new MiniDFSCluster.Builder(conf).manageNameDfsDirs(false)
.numDataNodes(3).build();
try {
cluster.waitActive();
secondary = startSecondaryNameNode(conf);
secondary.doCheckpoint();
assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
checkpointNameDir1.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY + " must be trimmed ",
checkpointNameDir2.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+ " must be trimmed ", checkpointEditsDir1.exists());
assertTrue(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY
+ " must be trimmed ", checkpointEditsDir2.exists());
} finally {
secondary.shutdown();
cluster.shutdown();
}
}
}
| 25,581
| 39.865815
| 100
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.spy;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.RandomAccessFile;
import java.nio.channels.FileChannel;
import java.util.HashMap;
import java.util.Map;
import java.util.SortedMap;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.PathUtils;
import org.apache.log4j.Level;
import org.junit.Test;
import com.google.common.collect.Maps;
import com.google.common.io.Files;
public class TestFSEditLogLoader {
static {
((Log4JLogger)FSImage.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger)FSEditLogLoader.LOG).getLogger().setLevel(Level.ALL);
}
private static final File TEST_DIR = PathUtils.getTestDir(TestFSEditLogLoader.class);
private static final int NUM_DATA_NODES = 0;
@Test
public void testDisplayRecentEditLogOpCodes() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FileSystem fileSys = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
.enableManagedDfsDirsRedundancy(false).build();
cluster.waitActive();
fileSys = cluster.getFileSystem();
final FSNamesystem namesystem = cluster.getNamesystem();
FSImage fsimage = namesystem.getFSImage();
for (int i = 0; i < 20; i++) {
fileSys.mkdirs(new Path("/tmp/tmp" + i));
}
StorageDirectory sd = fsimage.getStorage().dirIterator(NameNodeDirType.EDITS).next();
cluster.shutdown();
File editFile = FSImageTestUtil.findLatestEditsLog(sd).getFile();
assertTrue("Should exist: " + editFile, editFile.exists());
// Corrupt the edits file.
long fileLen = editFile.length();
RandomAccessFile rwf = new RandomAccessFile(editFile, "rw");
rwf.seek(fileLen - 40);
for (int i = 0; i < 20; i++) {
rwf.write(FSEditLogOpCodes.OP_DELETE.getOpCode());
}
rwf.close();
StringBuilder bld = new StringBuilder();
bld.append("^Error replaying edit log at offset \\d+. ");
bld.append("Expected transaction ID was \\d+\n");
bld.append("Recent opcode offsets: (\\d+\\s*){4}$");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATA_NODES)
.enableManagedDfsDirsRedundancy(false).format(false).build();
fail("should not be able to start");
} catch (IOException e) {
assertTrue("error message contains opcodes message",
e.getMessage().matches(bld.toString()));
}
}
/**
* Test that, if the NN restarts with a new minimum replication,
* any files created with the old replication count will get
* automatically bumped up to the new minimum upon restart.
*/
@Test
public void testReplicationAdjusted() throws Exception {
// start a cluster
Configuration conf = new HdfsConfiguration();
// Replicate and heartbeat fast to shave a few seconds off test
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.build();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
// Create a file with replication count 1
Path p = new Path("/testfile");
DFSTestUtil.createFile(fs, p, 10, /*repl*/ (short)1, 1);
DFSTestUtil.waitReplication(fs, p, (short)1);
// Shut down and restart cluster with new minimum replication of 2
cluster.shutdown();
cluster = null;
conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY, 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2)
.format(false).build();
cluster.waitActive();
fs = cluster.getFileSystem();
// The file should get adjusted to replication 2 when
// the edit log is replayed.
DFSTestUtil.waitReplication(fs, p, (short)2);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Corrupt the byte at the given offset in the given file,
* by subtracting 1 from it.
*/
private void corruptByteInFile(File file, long offset)
throws IOException {
RandomAccessFile raf = new RandomAccessFile(file, "rw");
try {
raf.seek(offset);
int origByte = raf.read();
raf.seek(offset);
raf.writeByte(origByte - 1);
} finally {
IOUtils.closeStream(raf);
}
}
/**
* Truncate the given file to the given length
*/
private void truncateFile(File logFile, long newLength)
throws IOException {
RandomAccessFile raf = new RandomAccessFile(logFile, "rw");
raf.setLength(newLength);
raf.close();
}
/**
* Return the length of bytes in the given file after subtracting
* the trailer of 0xFF (OP_INVALID)s.
* This seeks to the end of the file and reads chunks backwards until
* it finds a non-0xFF byte.
* @throws IOException if the file cannot be read
*/
private static long getNonTrailerLength(File f) throws IOException {
final int chunkSizeToRead = 256*1024;
FileInputStream fis = new FileInputStream(f);
try {
byte buf[] = new byte[chunkSizeToRead];
FileChannel fc = fis.getChannel();
long size = fc.size();
long pos = size - (size % chunkSizeToRead);
while (pos >= 0) {
fc.position(pos);
int readLen = (int) Math.min(size - pos, chunkSizeToRead);
IOUtils.readFully(fis, buf, 0, readLen);
for (int i = readLen - 1; i >= 0; i--) {
if (buf[i] != FSEditLogOpCodes.OP_INVALID.getOpCode()) {
return pos + i + 1; // + 1 since we count this byte!
}
}
pos -= chunkSizeToRead;
}
return 0;
} finally {
fis.close();
}
}
@Test
public void testStreamLimiter() throws IOException {
final File LIMITER_TEST_FILE = new File(TEST_DIR, "limiter.test");
FileOutputStream fos = new FileOutputStream(LIMITER_TEST_FILE);
try {
fos.write(0x12);
fos.write(0x12);
fos.write(0x12);
} finally {
fos.close();
}
FileInputStream fin = new FileInputStream(LIMITER_TEST_FILE);
BufferedInputStream bin = new BufferedInputStream(fin);
FSEditLogLoader.PositionTrackingInputStream tracker =
new FSEditLogLoader.PositionTrackingInputStream(bin);
try {
tracker.setLimit(2);
tracker.mark(100);
tracker.read();
tracker.read();
try {
tracker.read();
fail("expected to get IOException after reading past the limit");
} catch (IOException e) {
}
tracker.reset();
tracker.mark(100);
byte arr[] = new byte[3];
try {
tracker.read(arr);
fail("expected to get IOException after reading past the limit");
} catch (IOException e) {
}
tracker.reset();
arr = new byte[2];
tracker.read(arr);
} finally {
tracker.close();
}
}
/**
* Create an unfinalized edit log for testing purposes
*
* @param testDir Directory to create the edit log in
* @param numTx Number of transactions to add to the new edit log
* @param offsetToTxId A map from transaction IDs to offsets in the
* edit log file.
* @return The new edit log file name.
* @throws IOException
*/
static private File prepareUnfinalizedTestEditLog(File testDir, int numTx,
SortedMap<Long, Long> offsetToTxId) throws IOException {
File inProgressFile = new File(testDir, NNStorage.getInProgressEditsFileName(1));
FSEditLog fsel = null, spyLog = null;
try {
fsel = FSImageTestUtil.createStandaloneEditLog(testDir);
spyLog = spy(fsel);
// Normally, the in-progress edit log would be finalized by
// FSEditLog#endCurrentLogSegment. For testing purposes, we
// disable that here.
doNothing().when(spyLog).endCurrentLogSegment(true);
spyLog.openForWrite(NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION);
assertTrue("should exist: " + inProgressFile, inProgressFile.exists());
for (int i = 0; i < numTx; i++) {
long trueOffset = getNonTrailerLength(inProgressFile);
long thisTxId = spyLog.getLastWrittenTxId() + 1;
offsetToTxId.put(trueOffset, thisTxId);
System.err.println("txid " + thisTxId + " at offset " + trueOffset);
spyLog.logDelete("path" + i, i, false);
spyLog.logSync();
}
} finally {
if (spyLog != null) {
spyLog.close();
} else if (fsel != null) {
fsel.close();
}
}
return inProgressFile;
}
@Test
public void testValidateEditLogWithCorruptHeader() throws IOException {
File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptHeader");
SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
File logFile = prepareUnfinalizedTestEditLog(testDir, 2, offsetToTxId);
RandomAccessFile rwf = new RandomAccessFile(logFile, "rw");
try {
rwf.seek(0);
rwf.writeLong(42); // corrupt header
} finally {
rwf.close();
}
EditLogValidation validation = EditLogFileInputStream.validateEditLog(logFile);
assertTrue(validation.hasCorruptHeader());
}
@Test
public void testValidateEditLogWithCorruptBody() throws IOException {
File testDir = new File(TEST_DIR, "testValidateEditLogWithCorruptBody");
SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
final int NUM_TXNS = 20;
File logFile = prepareUnfinalizedTestEditLog(testDir, NUM_TXNS,
offsetToTxId);
// Back up the uncorrupted log
File logFileBak = new File(testDir, logFile.getName() + ".bak");
Files.copy(logFile, logFileBak);
EditLogValidation validation =
EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader());
// We expect that there will be an OP_START_LOG_SEGMENT, followed by
// NUM_TXNS opcodes, followed by an OP_END_LOG_SEGMENT.
assertEquals(NUM_TXNS + 1, validation.getEndTxId());
// Corrupt each edit and verify that validation continues to work
for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) {
long txOffset = entry.getKey();
long txId = entry.getValue();
// Restore backup, corrupt the txn opcode
Files.copy(logFileBak, logFile);
corruptByteInFile(logFile, txOffset);
validation = EditLogFileInputStream.validateEditLog(logFile);
long expectedEndTxId = (txId == (NUM_TXNS + 1)) ?
NUM_TXNS : (NUM_TXNS + 1);
assertEquals("Failed when corrupting txn opcode at " + txOffset,
expectedEndTxId, validation.getEndTxId());
assertTrue(!validation.hasCorruptHeader());
}
// Truncate right before each edit and verify that validation continues
// to work
for (Map.Entry<Long, Long> entry : offsetToTxId.entrySet()) {
long txOffset = entry.getKey();
long txId = entry.getValue();
// Restore backup, corrupt the txn opcode
Files.copy(logFileBak, logFile);
truncateFile(logFile, txOffset);
validation = EditLogFileInputStream.validateEditLog(logFile);
long expectedEndTxId = (txId == 0) ?
HdfsServerConstants.INVALID_TXID : (txId - 1);
assertEquals("Failed when corrupting txid " + txId + " txn opcode " +
"at " + txOffset, expectedEndTxId, validation.getEndTxId());
assertTrue(!validation.hasCorruptHeader());
}
}
@Test
public void testValidateEmptyEditLog() throws IOException {
File testDir = new File(TEST_DIR, "testValidateEmptyEditLog");
SortedMap<Long, Long> offsetToTxId = Maps.newTreeMap();
File logFile = prepareUnfinalizedTestEditLog(testDir, 0, offsetToTxId);
// Truncate the file so that there is nothing except the header and
// layout flags section.
truncateFile(logFile, 8);
EditLogValidation validation =
EditLogFileInputStream.validateEditLog(logFile);
assertTrue(!validation.hasCorruptHeader());
assertEquals(HdfsServerConstants.INVALID_TXID, validation.getEndTxId());
}
private static final Map<Byte, FSEditLogOpCodes> byteToEnum =
new HashMap<Byte, FSEditLogOpCodes>();
static {
for(FSEditLogOpCodes opCode : FSEditLogOpCodes.values()) {
byteToEnum.put(opCode.getOpCode(), opCode);
}
}
private static FSEditLogOpCodes fromByte(byte opCode) {
return byteToEnum.get(opCode);
}
@Test
public void testFSEditLogOpCodes() throws IOException {
//try all codes
for(FSEditLogOpCodes c : FSEditLogOpCodes.values()) {
final byte code = c.getOpCode();
assertEquals("c=" + c + ", code=" + code,
c, FSEditLogOpCodes.fromByte(code));
}
//try all byte values
for(int b = 0; b < (1 << Byte.SIZE); b++) {
final byte code = (byte)b;
assertEquals("b=" + b + ", code=" + code,
fromByte(code), FSEditLogOpCodes.fromByte(code));
}
}
}
| 14,858
| 34.547847
| 89
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FileNameGenerator.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.util.Arrays;
/**
* File name generator.
*
* Each directory contains not more than a fixed number (filesPerDir)
* of files and directories.
* When the number of files in one directory reaches the maximum,
* the generator creates a new directory and proceeds generating files in it.
* The generated namespace tree is balanced that is any path to a leaf
* file is not less than the height of the tree minus one.
*/
public class FileNameGenerator {
private static final int DEFAULT_FILES_PER_DIRECTORY = 32;
private final int[] pathIndecies = new int[20]; // this will support up to 32**20 = 2**100 = 10**30 files
private final String baseDir;
private String currentDir;
private final int filesPerDirectory;
private long fileCount;
FileNameGenerator(String baseDir) {
this(baseDir, DEFAULT_FILES_PER_DIRECTORY);
}
FileNameGenerator(String baseDir, int filesPerDir) {
this.baseDir = baseDir;
this.filesPerDirectory = filesPerDir;
reset();
}
String getNextDirName(String prefix) {
int depth = 0;
while(pathIndecies[depth] >= 0)
depth++;
int level;
for(level = depth-1;
level >= 0 && pathIndecies[level] == filesPerDirectory-1; level--)
pathIndecies[level] = 0;
if(level < 0)
pathIndecies[depth] = 0;
else
pathIndecies[level]++;
level = 0;
String next = baseDir;
while(pathIndecies[level] >= 0)
next = next + "/" + prefix + pathIndecies[level++];
return next;
}
synchronized String getNextFileName(String fileNamePrefix) {
long fNum = fileCount % filesPerDirectory;
if(fNum == 0) {
currentDir = getNextDirName(fileNamePrefix + "Dir");
}
String fn = currentDir + "/" + fileNamePrefix + fileCount;
fileCount++;
return fn;
}
private synchronized void reset() {
Arrays.fill(pathIndecies, -1);
fileCount = 0L;
currentDir = "";
}
synchronized int getFilesPerDirectory() {
return filesPerDirectory;
}
synchronized String getCurrentDir() {
return currentDir;
}
}
| 2,926
| 30.138298
| 107
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSAclBaseTest.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AclException;
import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Lists;
/**
* Tests NameNode interaction for all ACL modification APIs. This test suite
* also covers interaction of setPermission with inodes that have ACLs.
*/
public abstract class FSAclBaseTest {
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { });
private static final UserGroupInformation SUPERGROUP_MEMBER =
UserGroupInformation.createUserForTesting("super", new String[] {
DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT });
// group member
private static final UserGroupInformation BOB = UserGroupInformation
.createUserForTesting("bob", new String[] { "groupY", "groupZ" });
protected static MiniDFSCluster cluster;
protected static Configuration conf;
private static int pathCount = 0;
private static Path path;
@Rule
public ExpectedException exception = ExpectedException.none();
private FileSystem fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember, fsAsBob;
protected static void startCluster() throws IOException {
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
}
@AfterClass
public static void shutdown() {
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() throws Exception {
pathCount += 1;
path = new Path("/p" + pathCount);
initFileSystems();
}
@After
public void destroyFileSystems() {
IOUtils.cleanup(null, fs, fsAsBruce, fsAsDiana, fsAsSupergroupMember);
fs = fsAsBruce = fsAsDiana = fsAsSupergroupMember = fsAsBob = null;
}
@Test
public void testModifyAclEntries() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
fs.modifyAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testModifyAclEntriesOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo", READ_EXECUTE));
fs.modifyAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testModifyAclEntriesOnlyDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
fs.modifyAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testModifyAclEntriesMinimal() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo", READ_WRITE));
fs.modifyAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_WRITE),
aclEntry(ACCESS, GROUP, READ) }, returned);
assertPermission((short)010660);
assertAclFeature(true);
}
@Test
public void testModifyAclEntriesMinimalDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE));
fs.modifyAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testModifyAclEntriesCustomMask() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, MASK, NONE));
fs.modifyAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ) }, returned);
assertPermission((short)010600);
assertAclFeature(true);
}
@Test
public void testModifyAclEntriesStickyBit() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE));
fs.modifyAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ_EXECUTE),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)011750);
assertAclFeature(true);
}
@Test(expected=FileNotFoundException.class)
public void testModifyAclEntriesPathNotFound() throws IOException {
// Path has not been created.
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE));
fs.modifyAclEntries(path, aclSpec);
}
@Test(expected=AclException.class)
public void testModifyAclEntriesDefaultOnFile() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.modifyAclEntries(path, aclSpec);
}
@Test
public void testRemoveAclEntries() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"),
aclEntry(DEFAULT, USER, "foo"));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testRemoveAclEntriesOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, USER, "bar", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bar", READ_WRITE),
aclEntry(ACCESS, GROUP, READ_WRITE) }, returned);
assertPermission((short)010760);
assertAclFeature(true);
}
@Test
public void testRemoveAclEntriesOnlyDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, USER, "bar", READ_EXECUTE));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo"));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bar", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testRemoveAclEntriesMinimal() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0760));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_WRITE),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"),
aclEntry(ACCESS, MASK));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0760);
assertAclFeature(false);
}
@Test
public void testRemoveAclEntriesMinimalDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"),
aclEntry(ACCESS, MASK),
aclEntry(DEFAULT, USER, "foo"),
aclEntry(DEFAULT, MASK));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testRemoveAclEntriesStickyBit() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"),
aclEntry(DEFAULT, USER, "foo"));
fs.removeAclEntries(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)011750);
assertAclFeature(true);
}
@Test(expected=FileNotFoundException.class)
public void testRemoveAclEntriesPathNotFound() throws IOException {
// Path has not been created.
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo"));
fs.removeAclEntries(path, aclSpec);
}
@Test
public void testRemoveDefaultAcl() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.removeDefaultAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
assertPermission((short)010770);
assertAclFeature(true);
// restart of the cluster
restartCluster();
s = fs.getAclStatus(path);
AclEntry[] afterRestart = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(returned, afterRestart);
}
@Test
public void testRemoveDefaultAclOnlyAccess() throws Exception {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
fs.removeDefaultAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
assertPermission((short)010770);
assertAclFeature(true);
// restart of the cluster
restartCluster();
s = fs.getAclStatus(path);
AclEntry[] afterRestart = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(returned, afterRestart);
}
@Test
public void testRemoveDefaultAclOnlyDefault() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.removeDefaultAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0750);
assertAclFeature(false);
// restart of the cluster
restartCluster();
s = fs.getAclStatus(path);
AclEntry[] afterRestart = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(returned, afterRestart);
}
@Test
public void testRemoveDefaultAclMinimal() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.removeDefaultAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0750);
assertAclFeature(false);
// restart of the cluster
restartCluster();
s = fs.getAclStatus(path);
AclEntry[] afterRestart = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(returned, afterRestart);
}
@Test
public void testRemoveDefaultAclStickyBit() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.removeDefaultAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
assertPermission((short)011770);
assertAclFeature(true);
// restart of the cluster
restartCluster();
s = fs.getAclStatus(path);
AclEntry[] afterRestart = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(returned, afterRestart);
}
@Test(expected=FileNotFoundException.class)
public void testRemoveDefaultAclPathNotFound() throws IOException {
// Path has not been created.
fs.removeDefaultAcl(path);
}
@Test
public void testRemoveAcl() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.removeAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0750);
assertAclFeature(false);
}
@Test
public void testRemoveAclMinimalAcl() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
fs.removeAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0640);
assertAclFeature(false);
}
@Test
public void testRemoveAclStickyBit() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.removeAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)01750);
assertAclFeature(false);
}
@Test
public void testRemoveAclOnlyDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.removeAcl(path);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0750);
assertAclFeature(false);
}
@Test(expected=FileNotFoundException.class)
public void testRemoveAclPathNotFound() throws IOException {
// Path has not been created.
fs.removeAcl(path);
}
@Test
public void testSetAcl() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010770);
assertAclFeature(true);
}
@Test
public void testSetAclOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) }, returned);
assertPermission((short)010640);
assertAclFeature(true);
}
@Test
public void testSetAclOnlyDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testSetAclMinimal() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0644));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0640);
assertAclFeature(false);
}
@Test
public void testSetAclMinimalDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE));
fs.setAcl(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010750);
assertAclFeature(true);
}
@Test
public void testSetAclCustomMask() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, MASK, ALL),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) }, returned);
assertPermission((short)010670);
assertAclFeature(true);
}
@Test
public void testSetAclStickyBit() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)01750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)011770);
assertAclFeature(true);
}
@Test(expected=FileNotFoundException.class)
public void testSetAclPathNotFound() throws IOException {
// Path has not been created.
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
}
@Test(expected=AclException.class)
public void testSetAclDefaultOnFile() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
}
@Test
public void testSetPermission() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.setPermission(path, FsPermission.createImmutable((short)0700));
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010700);
assertAclFeature(true);
}
@Test
public void testSetPermissionOnlyAccess() throws IOException {
fs.create(path).close();
fs.setPermission(path, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, NONE));
fs.setAcl(path, aclSpec);
fs.setPermission(path, FsPermission.createImmutable((short)0600));
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", READ),
aclEntry(ACCESS, GROUP, READ) }, returned);
assertPermission((short)010600);
assertAclFeature(true);
}
@Test
public void testSetPermissionOnlyDefault() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(ACCESS, OTHER, NONE),
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
fs.setPermission(path, FsPermission.createImmutable((short)0700));
AclStatus s = fs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010700);
assertAclFeature(true);
}
@Test
public void testSetPermissionCannotSetAclBit() throws IOException {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
fs.setPermission(path, FsPermission.createImmutable((short)0700));
assertPermission((short)0700);
fs.setPermission(path,
new FsPermissionExtension(FsPermission.
createImmutable((short)0755), true, true));
INode inode = cluster.getNamesystem().getFSDirectory().getINode(
path.toUri().getPath(), false);
assertNotNull(inode);
FsPermission perm = inode.getFsPermission();
assertNotNull(perm);
assertEquals(0755, perm.toShort());
assertEquals(0755, perm.toExtendedShort());
assertAclFeature(false);
}
@Test
public void testDefaultAclNewFile() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
Path filePath = new Path(path, "file1");
fs.create(filePath).close();
AclStatus s = fs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
assertPermission(filePath, (short)010640);
assertAclFeature(filePath, true);
}
@Test
public void testOnlyAccessAclNewFile() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo", ALL));
fs.modifyAclEntries(path, aclSpec);
Path filePath = new Path(path, "file1");
fs.create(filePath).close();
AclStatus s = fs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission(filePath, (short)0644);
assertAclFeature(filePath, false);
}
@Test
public void testDefaultMinimalAclNewFile() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE));
fs.setAcl(path, aclSpec);
Path filePath = new Path(path, "file1");
fs.create(filePath).close();
AclStatus s = fs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission(filePath, (short)0640);
assertAclFeature(filePath, false);
}
@Test
public void testDefaultAclNewDir() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
Path dirPath = new Path(path, "dir1");
fs.mkdirs(dirPath);
AclStatus s = fs.getAclStatus(dirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission(dirPath, (short)010750);
assertAclFeature(dirPath, true);
}
@Test
public void testOnlyAccessAclNewDir() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "foo", ALL));
fs.modifyAclEntries(path, aclSpec);
Path dirPath = new Path(path, "dir1");
fs.mkdirs(dirPath);
AclStatus s = fs.getAclStatus(dirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission(dirPath, (short)0755);
assertAclFeature(dirPath, false);
}
@Test
public void testDefaultMinimalAclNewDir() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE));
fs.setAcl(path, aclSpec);
Path dirPath = new Path(path, "dir1");
fs.mkdirs(dirPath);
AclStatus s = fs.getAclStatus(dirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission(dirPath, (short)010750);
assertAclFeature(dirPath, true);
}
@Test
public void testDefaultAclNewFileIntermediate() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
Path dirPath = new Path(path, "dir1");
Path filePath = new Path(dirPath, "file1");
fs.create(filePath).close();
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) };
AclStatus s = fs.getAclStatus(dirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(dirPath, (short)010750);
assertAclFeature(dirPath, true);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) };
s = fs.getAclStatus(filePath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(filePath, (short)010640);
assertAclFeature(filePath, true);
}
@Test
public void testDefaultAclNewDirIntermediate() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
Path dirPath = new Path(path, "dir1");
Path subdirPath = new Path(dirPath, "subdir1");
fs.mkdirs(subdirPath);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) };
AclStatus s = fs.getAclStatus(dirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(dirPath, (short)010750);
assertAclFeature(dirPath, true);
s = fs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(subdirPath, (short)010750);
assertAclFeature(subdirPath, true);
}
@Test
public void testDefaultAclNewSymlinkIntermediate() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0750));
Path filePath = new Path(path, "file1");
fs.create(filePath).close();
fs.setPermission(filePath, FsPermission.createImmutable((short)0640));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
Path dirPath = new Path(path, "dir1");
Path linkPath = new Path(dirPath, "link1");
fs.createSymlink(filePath, linkPath, true);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, NONE) };
AclStatus s = fs.getAclStatus(dirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(dirPath, (short)010750);
assertAclFeature(dirPath, true);
expected = new AclEntry[] { };
s = fs.getAclStatus(linkPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(linkPath, (short)0640);
assertAclFeature(linkPath, false);
s = fs.getAclStatus(filePath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(filePath, (short)0640);
assertAclFeature(filePath, false);
}
@Test
public void testDefaultAclNewFileWithMode() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0755));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
Path filePath = new Path(path, "file1");
int bufferSize = cluster.getConfiguration(0).getInt(
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY,
CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
fs.create(filePath, new FsPermission((short)0740), false, bufferSize,
fs.getDefaultReplication(filePath), fs.getDefaultBlockSize(path), null)
.close();
AclStatus s = fs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE) }, returned);
assertPermission(filePath, (short)010740);
assertAclFeature(filePath, true);
}
@Test
public void testDefaultAclNewDirWithMode() throws Exception {
FileSystem.mkdirs(fs, path, FsPermission.createImmutable((short)0755));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(path, aclSpec);
Path dirPath = new Path(path, "dir1");
fs.mkdirs(dirPath, new FsPermission((short)0740));
AclStatus s = fs.getAclStatus(dirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "foo", ALL),
aclEntry(ACCESS, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "foo", ALL),
aclEntry(DEFAULT, GROUP, READ_EXECUTE),
aclEntry(DEFAULT, MASK, ALL),
aclEntry(DEFAULT, OTHER, READ_EXECUTE) }, returned);
assertPermission(dirPath, (short)010740);
assertAclFeature(dirPath, true);
}
@Test
public void testDefaultAclRenamedFile() throws Exception {
Path dirPath = new Path(path, "dir");
FileSystem.mkdirs(fs, dirPath, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(dirPath, aclSpec);
Path filePath = new Path(path, "file1");
fs.create(filePath).close();
fs.setPermission(filePath, FsPermission.createImmutable((short)0640));
Path renamedFilePath = new Path(dirPath, "file1");
fs.rename(filePath, renamedFilePath);
AclEntry[] expected = new AclEntry[] { };
AclStatus s = fs.getAclStatus(renamedFilePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(renamedFilePath, (short)0640);
assertAclFeature(renamedFilePath, false);
}
@Test
public void testDefaultAclRenamedDir() throws Exception {
Path dirPath = new Path(path, "dir");
FileSystem.mkdirs(fs, dirPath, FsPermission.createImmutable((short)0750));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "foo", ALL));
fs.setAcl(dirPath, aclSpec);
Path subdirPath = new Path(path, "subdir");
FileSystem.mkdirs(fs, subdirPath, FsPermission.createImmutable((short)0750));
Path renamedSubdirPath = new Path(dirPath, "subdir");
fs.rename(subdirPath, renamedSubdirPath);
AclEntry[] expected = new AclEntry[] { };
AclStatus s = fs.getAclStatus(renamedSubdirPath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission(renamedSubdirPath, (short)0750);
assertAclFeature(renamedSubdirPath, false);
}
@Test
public void testSkipAclEnforcementPermsDisabled() throws Exception {
Path bruceDir = new Path(path, "bruce");
Path bruceFile = new Path(bruceDir, "file");
fs.mkdirs(bruceDir);
fs.setOwner(bruceDir, "bruce", null);
fsAsBruce.create(bruceFile).close();
fsAsBruce.modifyAclEntries(bruceFile, Lists.newArrayList(
aclEntry(ACCESS, USER, "diana", NONE)));
assertFilePermissionDenied(fsAsDiana, DIANA, bruceFile);
try {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
restartCluster();
assertFilePermissionGranted(fsAsDiana, DIANA, bruceFile);
} finally {
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
restartCluster();
}
}
@Test
public void testSkipAclEnforcementSuper() throws Exception {
Path bruceDir = new Path(path, "bruce");
Path bruceFile = new Path(bruceDir, "file");
fs.mkdirs(bruceDir);
fs.setOwner(bruceDir, "bruce", null);
fsAsBruce.create(bruceFile).close();
fsAsBruce.modifyAclEntries(bruceFile, Lists.newArrayList(
aclEntry(ACCESS, USER, "diana", NONE)));
assertFilePermissionGranted(fs, DIANA, bruceFile);
assertFilePermissionGranted(fsAsBruce, DIANA, bruceFile);
assertFilePermissionDenied(fsAsDiana, DIANA, bruceFile);
assertFilePermissionGranted(fsAsSupergroupMember, SUPERGROUP_MEMBER,
bruceFile);
}
@Test
public void testModifyAclEntriesMustBeOwnerOrSuper() throws Exception {
Path bruceDir = new Path(path, "bruce");
Path bruceFile = new Path(bruceDir, "file");
fs.mkdirs(bruceDir);
fs.setOwner(bruceDir, "bruce", null);
fsAsBruce.create(bruceFile).close();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana", ALL));
fsAsBruce.modifyAclEntries(bruceFile, aclSpec);
fs.modifyAclEntries(bruceFile, aclSpec);
fsAsSupergroupMember.modifyAclEntries(bruceFile, aclSpec);
exception.expect(AccessControlException.class);
fsAsDiana.modifyAclEntries(bruceFile, aclSpec);
}
@Test
public void testRemoveAclEntriesMustBeOwnerOrSuper() throws Exception {
Path bruceDir = new Path(path, "bruce");
Path bruceFile = new Path(bruceDir, "file");
fs.mkdirs(bruceDir);
fs.setOwner(bruceDir, "bruce", null);
fsAsBruce.create(bruceFile).close();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana"));
fsAsBruce.removeAclEntries(bruceFile, aclSpec);
fs.removeAclEntries(bruceFile, aclSpec);
fsAsSupergroupMember.removeAclEntries(bruceFile, aclSpec);
exception.expect(AccessControlException.class);
fsAsDiana.removeAclEntries(bruceFile, aclSpec);
}
@Test
public void testRemoveDefaultAclMustBeOwnerOrSuper() throws Exception {
Path bruceDir = new Path(path, "bruce");
Path bruceFile = new Path(bruceDir, "file");
fs.mkdirs(bruceDir);
fs.setOwner(bruceDir, "bruce", null);
fsAsBruce.create(bruceFile).close();
fsAsBruce.removeDefaultAcl(bruceFile);
fs.removeDefaultAcl(bruceFile);
fsAsSupergroupMember.removeDefaultAcl(bruceFile);
exception.expect(AccessControlException.class);
fsAsDiana.removeDefaultAcl(bruceFile);
}
@Test
public void testRemoveAclMustBeOwnerOrSuper() throws Exception {
Path bruceDir = new Path(path, "bruce");
Path bruceFile = new Path(bruceDir, "file");
fs.mkdirs(bruceDir);
fs.setOwner(bruceDir, "bruce", null);
fsAsBruce.create(bruceFile).close();
fsAsBruce.removeAcl(bruceFile);
fs.removeAcl(bruceFile);
fsAsSupergroupMember.removeAcl(bruceFile);
exception.expect(AccessControlException.class);
fsAsDiana.removeAcl(bruceFile);
}
@Test
public void testSetAclMustBeOwnerOrSuper() throws Exception {
Path bruceDir = new Path(path, "bruce");
Path bruceFile = new Path(bruceDir, "file");
fs.mkdirs(bruceDir);
fs.setOwner(bruceDir, "bruce", null);
fsAsBruce.create(bruceFile).close();
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_WRITE),
aclEntry(ACCESS, USER, "diana", READ_WRITE),
aclEntry(ACCESS, GROUP, READ),
aclEntry(ACCESS, OTHER, READ));
fsAsBruce.setAcl(bruceFile, aclSpec);
fs.setAcl(bruceFile, aclSpec);
fsAsSupergroupMember.setAcl(bruceFile, aclSpec);
exception.expect(AccessControlException.class);
fsAsDiana.setAcl(bruceFile, aclSpec);
}
@Test
public void testGetAclStatusRequiresTraverseOrSuper() throws Exception {
Path bruceDir = new Path(path, "bruce");
Path bruceFile = new Path(bruceDir, "file");
fs.mkdirs(bruceDir);
fs.setOwner(bruceDir, "bruce", null);
fsAsBruce.create(bruceFile).close();
fsAsBruce.setAcl(bruceDir, Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "diana", READ),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE)));
fsAsBruce.getAclStatus(bruceFile);
fs.getAclStatus(bruceFile);
fsAsSupergroupMember.getAclStatus(bruceFile);
exception.expect(AccessControlException.class);
fsAsDiana.getAclStatus(bruceFile);
}
@Test
public void testAccess() throws IOException, InterruptedException {
Path p1 = new Path("/p1");
fs.mkdirs(p1);
fs.setOwner(p1, BRUCE.getShortUserName(), "groupX");
fsAsBruce.setAcl(p1, Lists.newArrayList(
aclEntry(ACCESS, USER, READ),
aclEntry(ACCESS, USER, "bruce", READ),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE)));
fsAsBruce.access(p1, FsAction.READ);
try {
fsAsBruce.access(p1, FsAction.WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected
}
Path badPath = new Path("/bad/bad");
try {
fsAsBruce.access(badPath, FsAction.READ);
fail("The access call should have failed");
} catch (FileNotFoundException e) {
// expected
}
// Add a named group entry with only READ access
fsAsBruce.modifyAclEntries(p1, Lists.newArrayList(
aclEntry(ACCESS, GROUP, "groupY", READ)));
// Now bob should have read access, but not write
fsAsBob.access(p1, READ);
try {
fsAsBob.access(p1, WRITE);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected;
}
// Add another named group entry with WRITE access
fsAsBruce.modifyAclEntries(p1, Lists.newArrayList(
aclEntry(ACCESS, GROUP, "groupZ", WRITE)));
// Now bob should have write access
fsAsBob.access(p1, WRITE);
// Add a named user entry to deny bob
fsAsBruce.modifyAclEntries(p1,
Lists.newArrayList(aclEntry(ACCESS, USER, "bob", NONE)));
try {
fsAsBob.access(p1, READ);
fail("The access call should have failed.");
} catch (AccessControlException e) {
// expected;
}
}
@Test
public void testEffectiveAccess() throws Exception {
Path p1 = new Path("/testEffectiveAccess");
fs.mkdirs(p1);
// give all access at first
fs.setPermission(p1, FsPermission.valueOf("-rwxrwxrwx"));
AclStatus aclStatus = fs.getAclStatus(p1);
assertEquals("Entries should be empty", 0, aclStatus.getEntries().size());
assertEquals("Permission should be carried by AclStatus",
fs.getFileStatus(p1).getPermission(), aclStatus.getPermission());
// Add a named entries with all access
fs.modifyAclEntries(p1, Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, GROUP, "groupY", ALL)));
aclStatus = fs.getAclStatus(p1);
assertEquals("Entries should contain owner group entry also", 3, aclStatus
.getEntries().size());
// restrict the access
fs.setPermission(p1, FsPermission.valueOf("-rwxr-----"));
// latest permissions should be reflected as effective permission
aclStatus = fs.getAclStatus(p1);
List<AclEntry> entries = aclStatus.getEntries();
for (AclEntry aclEntry : entries) {
if (aclEntry.getName() != null || aclEntry.getType() == GROUP) {
assertEquals(FsAction.ALL, aclEntry.getPermission());
assertEquals(FsAction.READ, aclStatus.getEffectivePermission(aclEntry));
}
}
fsAsBruce.access(p1, READ);
try {
fsAsBruce.access(p1, WRITE);
fail("Access should not be given");
} catch (AccessControlException e) {
// expected
}
fsAsBob.access(p1, READ);
try {
fsAsBob.access(p1, WRITE);
fail("Access should not be given");
} catch (AccessControlException e) {
// expected
}
}
/**
* Verify the de-duplication of AclFeatures with same entries.
*
* @throws Exception
*/
@Test
public void testDeDuplication() throws Exception {
// This test needs to verify the count of the references which is held by
// static data structure. So shutting down entire cluster to get the fresh
// data.
shutdown();
AclStorage.getUniqueAclFeatures().clear();
startCluster();
setUp();
int currentSize = 0;
Path p1 = new Path("/testDeduplication");
{
// unique default AclEntries for this test
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "testdeduplicateuser", ALL),
aclEntry(DEFAULT, GROUP, "testdeduplicategroup", ALL));
fs.mkdirs(p1);
fs.modifyAclEntries(p1, aclSpec);
assertEquals("One more ACL feature should be unique", currentSize + 1,
AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
currentSize++;
}
Path child1 = new Path(p1, "child1");
AclFeature child1AclFeature;
{
// new child dir should copy entries from its parent.
fs.mkdirs(child1);
assertEquals("One more ACL feature should be unique", currentSize + 1,
AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
child1AclFeature = getAclFeature(child1, cluster);
assertEquals("Reference count should be 1", 1,
child1AclFeature.getRefCount());
currentSize++;
}
Path child2 = new Path(p1, "child2");
{
// new child dir should copy entries from its parent. But all entries are
// same as its sibling without any more acl changes.
fs.mkdirs(child2);
assertEquals("existing AclFeature should be re-used", currentSize,
AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
AclFeature child2AclFeature = getAclFeature(child1, cluster);
assertSame("Same Aclfeature should be re-used", child1AclFeature,
child2AclFeature);
assertEquals("Reference count should be 2", 2,
child2AclFeature.getRefCount());
}
{
// modification of ACL on should decrement the original reference count
// and increase new one.
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER,
"user1", ALL));
fs.modifyAclEntries(child1, aclSpec);
AclFeature modifiedAclFeature = getAclFeature(child1, cluster);
assertEquals("Old Reference count should be 1", 1,
child1AclFeature.getRefCount());
assertEquals("New Reference count should be 1", 1,
modifiedAclFeature.getRefCount());
// removing the new added ACL entry should refer to old ACLfeature
AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER)
.setName("user1").build();
fs.removeAclEntries(child1, Lists.newArrayList(aclEntry));
assertEquals("Old Reference count should be 2 again", 2,
child1AclFeature.getRefCount());
assertEquals("New Reference count should be 0", 0,
modifiedAclFeature.getRefCount());
}
{
// verify the reference count on deletion of Acls
fs.removeAcl(child2);
assertEquals("Reference count should be 1", 1,
child1AclFeature.getRefCount());
}
{
// verify the reference count on deletion of dir with ACL
fs.delete(child1, true);
assertEquals("Reference count should be 0", 0,
child1AclFeature.getRefCount());
}
Path file1 = new Path(p1, "file1");
Path file2 = new Path(p1, "file2");
AclFeature fileAclFeature;
{
// Using same reference on creation of file
fs.create(file1).close();
fileAclFeature = getAclFeature(file1, cluster);
assertEquals("Reference count should be 1", 1,
fileAclFeature.getRefCount());
fs.create(file2).close();
assertEquals("Reference count should be 2", 2,
fileAclFeature.getRefCount());
}
{
// modifying ACLs on file should decrease the reference count on old
// instance and increase on the new instance
List<AclEntry> aclSpec = Lists.newArrayList(aclEntry(ACCESS, USER,
"user1", ALL));
// adding new ACL entry
fs.modifyAclEntries(file1, aclSpec);
AclFeature modifiedFileAcl = getAclFeature(file1, cluster);
assertEquals("Old Reference count should be 1", 1,
fileAclFeature.getRefCount());
assertEquals("New Reference count should be 1", 1,
modifiedFileAcl.getRefCount());
// removing the new added ACL entry should refer to old ACLfeature
AclEntry aclEntry = new AclEntry.Builder().setScope(ACCESS).setType(USER)
.setName("user1").build();
fs.removeAclEntries(file1, Lists.newArrayList(aclEntry));
assertEquals("Old Reference count should be 2", 2,
fileAclFeature.getRefCount());
assertEquals("New Reference count should be 0", 0,
modifiedFileAcl.getRefCount());
}
{
// reference count should be decreased on deletion of files with ACLs
fs.delete(file2, true);
assertEquals("Reference count should be decreased on delete of the file",
1, fileAclFeature.getRefCount());
fs.delete(file1, true);
assertEquals("Reference count should be decreased on delete of the file",
0, fileAclFeature.getRefCount());
// On reference count reaches 0 instance should be removed from map
fs.create(file1).close();
AclFeature newFileAclFeature = getAclFeature(file1, cluster);
assertNotSame("Instance should be different on reference count 0",
fileAclFeature, newFileAclFeature);
fileAclFeature = newFileAclFeature;
}
Map<AclFeature, Integer> restartRefCounter = new HashMap<>();
// Restart the Namenode to check the references.
// Here reference counts will not be same after restart because, while
// shutting down namenode will not call any removal of AclFeature.
// However this is applicable only in case of tests as in real-cluster JVM
// itself will be new.
List<AclFeature> entriesBeforeRestart = AclStorage.getUniqueAclFeatures()
.getEntries();
{
//restart by loading edits
for (AclFeature aclFeature : entriesBeforeRestart) {
restartRefCounter.put(aclFeature, aclFeature.getRefCount());
}
cluster.restartNameNode(true);
List<AclFeature> entriesAfterRestart = AclStorage.getUniqueAclFeatures()
.getEntries();
assertEquals("Entries before and after should be same",
entriesBeforeRestart, entriesAfterRestart);
for (AclFeature aclFeature : entriesAfterRestart) {
int before = restartRefCounter.get(aclFeature);
assertEquals("ReferenceCount After Restart should be doubled",
before * 2, aclFeature.getRefCount());
}
}
{
//restart by loading fsimage
cluster.getNameNodeRpc()
.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc()
.setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
cluster.restartNameNode(true);
List<AclFeature> entriesAfterRestart = AclStorage.getUniqueAclFeatures()
.getEntries();
assertEquals("Entries before and after should be same",
entriesBeforeRestart, entriesAfterRestart);
for (AclFeature aclFeature : entriesAfterRestart) {
int before = restartRefCounter.get(aclFeature);
assertEquals("ReferenceCount After 2 Restarts should be tripled",
before * 3, aclFeature.getRefCount());
}
}
}
/**
* Creates a FileSystem for the super-user.
*
* @return FileSystem for super-user
* @throws Exception if creation fails
*/
protected FileSystem createFileSystem() throws Exception {
return cluster.getFileSystem();
}
/**
* Creates a FileSystem for a specific user.
*
* @param user UserGroupInformation specific user
* @return FileSystem for specific user
* @throws Exception if creation fails
*/
protected FileSystem createFileSystem(UserGroupInformation user)
throws Exception {
return DFSTestUtil.getFileSystemAs(user, cluster.getConfiguration(0));
}
/**
* Initializes all FileSystem instances used in the tests.
*
* @throws Exception if initialization fails
*/
private void initFileSystems() throws Exception {
fs = createFileSystem();
fsAsBruce = createFileSystem(BRUCE);
fsAsDiana = createFileSystem(DIANA);
fsAsBob = createFileSystem(BOB);
fsAsSupergroupMember = createFileSystem(SUPERGROUP_MEMBER);
}
/**
* Restarts the cluster without formatting, so all data is preserved.
*
* @throws Exception if restart fails
*/
private void restartCluster() throws Exception {
destroyFileSystems();
shutdown();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(false)
.build();
cluster.waitActive();
initFileSystems();
}
/**
* Asserts whether or not the inode for the test path has an AclFeature.
*
* @param expectAclFeature boolean true if an AclFeature must be present,
* false if an AclFeature must not be present
* @throws IOException thrown if there is an I/O error
*/
private static void assertAclFeature(boolean expectAclFeature)
throws IOException {
assertAclFeature(path, expectAclFeature);
}
/**
* Asserts whether or not the inode for a specific path has an AclFeature.
*
* @param pathToCheck Path inode to check
* @param expectAclFeature boolean true if an AclFeature must be present,
* false if an AclFeature must not be present
* @throws IOException thrown if there is an I/O error
*/
private static void assertAclFeature(Path pathToCheck,
boolean expectAclFeature) throws IOException {
AclFeature aclFeature = getAclFeature(pathToCheck, cluster);
if (expectAclFeature) {
assertNotNull(aclFeature);
// Intentionally capturing a reference to the entries, not using nested
// calls. This way, we get compile-time enforcement that the entries are
// stored in an ImmutableList.
ImmutableList<AclEntry> entries = AclStorage
.getEntriesFromAclFeature(aclFeature);
assertFalse(entries.isEmpty());
} else {
assertNull(aclFeature);
}
}
/**
* Get AclFeature for the path
*/
public static AclFeature getAclFeature(Path pathToCheck,
MiniDFSCluster cluster) throws IOException {
INode inode = cluster.getNamesystem().getFSDirectory()
.getINode(pathToCheck.toUri().getPath(), false);
assertNotNull(inode);
AclFeature aclFeature = inode.getAclFeature();
return aclFeature;
}
/**
* Asserts the value of the FsPermission bits on the inode of the test path.
*
* @param perm short expected permission bits
* @throws IOException thrown if there is an I/O error
*/
private void assertPermission(short perm) throws IOException {
assertPermission(path, perm);
}
/**
* Asserts the value of the FsPermission bits on the inode of a specific path.
*
* @param pathToCheck Path inode to check
* @param perm short expected permission bits
* @throws IOException thrown if there is an I/O error
*/
private void assertPermission(Path pathToCheck, short perm)
throws IOException {
AclTestHelpers.assertPermission(fs, pathToCheck, perm);
}
}
| 65,709
| 37.698469
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestMetadataVersionOutput.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.junit.After;
import org.junit.Test;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMESERVICE_ID;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX;
public class TestMetadataVersionOutput {
private MiniDFSCluster dfsCluster = null;
private final Configuration conf = new Configuration();
@After
public void tearDown() throws Exception {
if (dfsCluster != null) {
dfsCluster.shutdown();
}
Thread.sleep(2000);
}
private void initConfig() {
conf.set(DFS_NAMESERVICE_ID, "ns1");
conf.set(DFS_HA_NAMENODES_KEY_PREFIX + ".ns1", "nn1");
conf.set(DFS_HA_NAMENODE_ID_KEY, "nn1");
conf.set(DFS_NAMENODE_NAME_DIR_KEY + ".ns1.nn1", MiniDFSCluster.getBaseDirectory() + "1");
conf.unset(DFS_NAMENODE_NAME_DIR_KEY);
}
@Test(timeout = 30000)
public void testMetadataVersionOutput() throws IOException {
initConfig();
dfsCluster = new MiniDFSCluster.Builder(conf).
manageNameDfsDirs(false).
numDataNodes(1).
checkExitOnShutdown(false).
build();
dfsCluster.waitClusterUp();
dfsCluster.shutdown(false);
initConfig();
final PrintStream origOut = System.out;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
final PrintStream stdOut = new PrintStream(baos);
System.setOut(stdOut);
try {
NameNode.createNameNode(new String[] { "-metadataVersion" }, conf);
} catch (Exception e) {
assertExceptionContains("ExitException", e);
}
/* Check if meta data version is printed correctly. */
final String verNumStr = HdfsServerConstants.NAMENODE_LAYOUT_VERSION + "";
assertTrue(baos.toString("UTF-8").
contains("HDFS Image Version: " + verNumStr));
assertTrue(baos.toString("UTF-8").
contains("Software format version: " + verNumStr));
System.setOut(origOut);
}
}
| 3,269
| 35.333333
| 94
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.util.EnumSet;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.test.PathUtils;
import org.junit.Test;
public class TestFSImage {
private static final String HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ =
"image-with-zero-block-size.tar.gz";
@Test
public void testPersist() throws IOException {
Configuration conf = new Configuration();
testPersistHelper(conf);
}
@Test
public void testCompression() throws IOException {
Configuration conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, true);
conf.set(DFSConfigKeys.DFS_IMAGE_COMPRESSION_CODEC_KEY,
"org.apache.hadoop.io.compress.GzipCodec");
testPersistHelper(conf);
}
private void testPersistHelper(Configuration conf) throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
FSNamesystem fsn = cluster.getNamesystem();
DistributedFileSystem fs = cluster.getFileSystem();
final Path dir = new Path("/abc/def");
final Path file1 = new Path(dir, "f1");
final Path file2 = new Path(dir, "f2");
// create an empty file f1
fs.create(file1).close();
// create an under-construction file f2
FSDataOutputStream out = fs.create(file2);
out.writeBytes("hello");
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
.of(SyncFlag.UPDATE_LENGTH));
// checkpoint
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode();
cluster.waitActive();
fs = cluster.getFileSystem();
assertTrue(fs.isDirectory(dir));
assertTrue(fs.exists(file1));
assertTrue(fs.exists(file2));
// check internals of file2
INodeFile file2Node = fsn.dir.getINode4Write(file2.toString()).asFile();
assertEquals("hello".length(), file2Node.computeFileSize());
assertTrue(file2Node.isUnderConstruction());
BlockInfo[] blks = file2Node.getBlocks();
assertEquals(1, blks.length);
assertEquals(BlockUCState.UNDER_CONSTRUCTION, blks[0].getBlockUCState());
// check lease manager
Lease lease = fsn.leaseManager.getLease(file2Node);
Assert.assertNotNull(lease);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* On checkpointing , stale fsimage checkpoint file should be deleted.
*/
@Test
public void testRemovalStaleFsimageCkpt() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
Configuration conf = new HdfsConfiguration();
try {
cluster = new MiniDFSCluster.Builder(conf).
numDataNodes(1).format(true).build();
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
"0.0.0.0:0");
secondary = new SecondaryNameNode(conf);
// Do checkpointing
secondary.doCheckpoint();
NNStorage storage = secondary.getFSImage().storage;
File currentDir = FSImageTestUtil.
getCurrentDirs(storage, NameNodeDirType.IMAGE).get(0);
// Create a stale fsimage.ckpt file
File staleCkptFile = new File(currentDir.getPath() +
"/fsimage.ckpt_0000000000000000002");
staleCkptFile.createNewFile();
assertTrue(staleCkptFile.exists());
// After checkpoint stale fsimage.ckpt file should be deleted
secondary.doCheckpoint();
assertFalse(staleCkptFile.exists());
} finally {
if (secondary != null) {
secondary.shutdown();
secondary = null;
}
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
}
/**
* Ensure that the digest written by the saver equals to the digest of the
* file.
*/
@Test
public void testDigest() throws IOException {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
DistributedFileSystem fs = cluster.getFileSystem();
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
fs.saveNamespace();
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
File currentDir = FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0).get(
0);
File fsimage = FSImageTestUtil.findNewestImageFile(currentDir
.getAbsolutePath());
assertEquals(MD5FileUtils.readStoredMd5ForFile(fsimage),
MD5FileUtils.computeMd5ForFile(fsimage));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Ensure mtime and atime can be loaded from fsimage.
*/
@Test(timeout=60000)
public void testLoadMtimeAtime() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
DistributedFileSystem hdfs = cluster.getFileSystem();
String userDir = hdfs.getHomeDirectory().toUri().getPath().toString();
Path file = new Path(userDir, "file");
Path dir = new Path(userDir, "/dir");
Path link = new Path(userDir, "/link");
hdfs.createNewFile(file);
hdfs.mkdirs(dir);
hdfs.createSymlink(file, link, false);
long mtimeFile = hdfs.getFileStatus(file).getModificationTime();
long atimeFile = hdfs.getFileStatus(file).getAccessTime();
long mtimeDir = hdfs.getFileStatus(dir).getModificationTime();
long mtimeLink = hdfs.getFileLinkStatus(link).getModificationTime();
long atimeLink = hdfs.getFileLinkStatus(link).getAccessTime();
// save namespace and restart cluster
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(1).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
assertEquals(mtimeFile, hdfs.getFileStatus(file).getModificationTime());
assertEquals(atimeFile, hdfs.getFileStatus(file).getAccessTime());
assertEquals(mtimeDir, hdfs.getFileStatus(dir).getModificationTime());
assertEquals(mtimeLink, hdfs.getFileLinkStatus(link).getModificationTime());
assertEquals(atimeLink, hdfs.getFileLinkStatus(link).getAccessTime());
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* In this test case, I have created an image with a file having
* preferredblockSize = 0. We are trying to read this image (since file with
* preferredblockSize = 0 was allowed pre 2.1.0-beta version. The namenode
* after 2.6 version will not be able to read this particular file.
* See HDFS-7788 for more information.
* @throws Exception
*/
@Test
public void testZeroBlockSize() throws Exception {
final Configuration conf = new HdfsConfiguration();
String tarFile = System.getProperty("test.cache.data", "build/test/cache")
+ "/" + HADOOP_2_7_ZER0_BLOCK_SIZE_TGZ;
String testDir = PathUtils.getTestDirName(getClass());
File dfsDir = new File(testDir, "image-with-zero-block-size");
if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) {
throw new IOException("Could not delete dfs directory '" + dfsDir + "'");
}
FileUtil.unTar(new File(tarFile), new File(testDir));
File nameDir = new File(dfsDir, "name");
GenericTestUtils.assertExists(nameDir);
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
nameDir.getAbsolutePath());
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.waitSafeMode(false)
.startupOption(StartupOption.UPGRADE)
.build();
try {
FileSystem fs = cluster.getFileSystem();
Path testPath = new Path("/tmp/zeroBlockFile");
assertTrue("File /tmp/zeroBlockFile doesn't exist ", fs.exists(testPath));
assertTrue("Name node didn't come up", cluster.isNameNodeUp(0));
} finally {
cluster.shutdown();
//Clean up
FileUtil.fullyDelete(dfsDir);
}
}
}
| 10,513
| 36.820144
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestParallelImageWrite.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.util.Collections;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.junit.Test;
/**
* A JUnit test for checking if restarting DFS preserves integrity.
* Specifically with FSImage being written in parallel
*/
public class TestParallelImageWrite {
private static final int NUM_DATANODES = 4;
/** check if DFS remains in proper condition after a restart */
@Test
public void testRestartDFS() throws Exception {
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
FSNamesystem fsn = null;
int numNamenodeDirs;
DFSTestUtil files = new DFSTestUtil.Builder().setName("TestRestartDFS").
setNumFiles(200).build();
final String dir = "/srcdat";
final Path rootpath = new Path("/");
final Path dirpath = new Path(dir);
long rootmtime;
FileStatus rootstatus;
FileStatus dirstatus;
try {
cluster = new MiniDFSCluster.Builder(conf).format(true)
.numDataNodes(NUM_DATANODES).build();
String[] nameNodeDirs = conf.getStrings(
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new String[] {});
numNamenodeDirs = nameNodeDirs.length;
assertTrue("failed to get number of Namenode StorageDirs",
numNamenodeDirs != 0);
FileSystem fs = cluster.getFileSystem();
files.createFiles(fs, dir);
rootmtime = fs.getFileStatus(rootpath).getModificationTime();
rootstatus = fs.getFileStatus(dirpath);
dirstatus = fs.getFileStatus(dirpath);
fs.setOwner(rootpath, rootstatus.getOwner() + "_XXX", null);
fs.setOwner(dirpath, null, dirstatus.getGroup() + "_XXX");
} finally {
if (cluster != null) { cluster.shutdown(); }
}
try {
// Force the NN to save its images on startup so long as
// there are any uncheckpointed txns
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
// Here we restart the MiniDFScluster without formatting namenode
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(NUM_DATANODES).build();
fsn = cluster.getNamesystem();
FileSystem fs = cluster.getFileSystem();
assertTrue("Filesystem corrupted after restart.",
files.checkFiles(fs, dir));
final FileStatus newrootstatus = fs.getFileStatus(rootpath);
assertEquals(rootmtime, newrootstatus.getModificationTime());
assertEquals(rootstatus.getOwner() + "_XXX", newrootstatus.getOwner());
assertEquals(rootstatus.getGroup(), newrootstatus.getGroup());
final FileStatus newdirstatus = fs.getFileStatus(dirpath);
assertEquals(dirstatus.getOwner(), newdirstatus.getOwner());
assertEquals(dirstatus.getGroup() + "_XXX", newdirstatus.getGroup());
rootmtime = fs.getFileStatus(rootpath).getModificationTime();
final String checkAfterRestart = checkImages(fsn, numNamenodeDirs);
// Modify the system and then perform saveNamespace
files.cleanup(fs, dir);
files.createFiles(fs, dir);
fsn.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
cluster.getNameNodeRpc().saveNamespace();
final String checkAfterModify = checkImages(fsn, numNamenodeDirs);
assertFalse("Modified namespace should change fsimage contents. " +
"was: " + checkAfterRestart + " now: " + checkAfterModify,
checkAfterRestart.equals(checkAfterModify));
fsn.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
files.cleanup(fs, dir);
} finally {
if (cluster != null) { cluster.shutdown(); }
}
}
/**
* Confirm that FSImage files in all StorageDirectory are the same,
* and non-empty, and there are the expected number of them.
* @param fsn - the FSNamesystem being checked.
* @param numImageDirs - the configured number of StorageDirectory of type IMAGE.
* @return - the md5 hash of the most recent FSImage files, which must all be the same.
* @throws AssertionError if image files are empty or different,
* if less than two StorageDirectory are provided, or if the
* actual number of StorageDirectory is less than configured.
*/
public static String checkImages(
FSNamesystem fsn, int numImageDirs)
throws Exception {
NNStorage stg = fsn.getFSImage().getStorage();
//any failed StorageDirectory is removed from the storageDirs list
assertEquals("Some StorageDirectories failed Upgrade",
numImageDirs, stg.getNumStorageDirs(NameNodeDirType.IMAGE));
assertTrue("Not enough fsimage copies in MiniDFSCluster " +
"to test parallel write", numImageDirs > 1);
// List of "current/" directory from each SD
List<File> dirs = FSImageTestUtil.getCurrentDirs(stg, NameNodeDirType.IMAGE);
// across directories, all files with same names should be identical hashes
FSImageTestUtil.assertParallelFilesAreIdentical(
dirs, Collections.<String>emptySet());
FSImageTestUtil.assertSameNewestImage(dirs);
// Return the hash of the newest image file
StorageDirectory firstSd = stg.dirIterator(NameNodeDirType.IMAGE).next();
File latestImage = FSImageTestUtil.findLatestImageFile(firstSd);
String md5 = FSImageTestUtil.getImageFileMD5IgnoringTxId(latestImage);
System.err.println("md5 of " + latestImage + ": " + md5);
return md5;
}
}
| 6,880
| 40.957317
| 89
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindow.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.top.window;
import org.junit.Assert;
import org.junit.Test;
public class TestRollingWindow {
final int WINDOW_LEN = 60000;
final int BUCKET_CNT = 10;
final int BUCKET_LEN = WINDOW_LEN / BUCKET_CNT;
@Test
public void testBasics() {
RollingWindow window = new RollingWindow(WINDOW_LEN, BUCKET_CNT);
long time = 1;
Assert.assertEquals("The initial sum of rolling window must be 0", 0,
window.getSum(time));
time = WINDOW_LEN + BUCKET_LEN * 3 / 2;
Assert.assertEquals("The initial sum of rolling window must be 0", 0,
window.getSum(time));
window.incAt(time, 5);
Assert.assertEquals(
"The sum of rolling window does not reflect the recent update", 5,
window.getSum(time));
time += BUCKET_LEN;
window.incAt(time, 6);
Assert.assertEquals(
"The sum of rolling window does not reflect the recent update", 11,
window.getSum(time));
time += WINDOW_LEN - BUCKET_LEN;
Assert.assertEquals(
"The sum of rolling window does not reflect rolling effect", 6,
window.getSum(time));
time += BUCKET_LEN;
Assert.assertEquals(
"The sum of rolling window does not reflect rolling effect", 0,
window.getSum(time));
}
@Test
public void testReorderedAccess() {
RollingWindow window = new RollingWindow(WINDOW_LEN, BUCKET_CNT);
long time = 2 * WINDOW_LEN + BUCKET_LEN * 3 / 2;
window.incAt(time, 5);
time++;
Assert.assertEquals(
"The sum of rolling window does not reflect the recent update", 5,
window.getSum(time));
long reorderedTime = time - 2 * BUCKET_LEN;
window.incAt(reorderedTime, 6);
Assert.assertEquals(
"The sum of rolling window does not reflect the reordered update", 11,
window.getSum(time));
time = reorderedTime + WINDOW_LEN;
Assert.assertEquals(
"The sum of rolling window does not reflect rolling effect", 5,
window.getSum(time));
}
}
| 2,835
| 32.364706
| 78
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/top/window/TestRollingWindowManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.top.window;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
import org.junit.Before;
import org.junit.Test;
import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.Op;
import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.TopWindow;
import static org.apache.hadoop.hdfs.server.namenode.top.window.RollingWindowManager.User;
import static org.junit.Assert.assertEquals;
public class TestRollingWindowManager {
Configuration conf;
RollingWindowManager manager;
String[] users;
final static int MIN_2_MS = 60000;
final int WINDOW_LEN_MS = 1 * MIN_2_MS;
final int BUCKET_CNT = 10;
final int N_TOP_USERS = 10;
final int BUCKET_LEN = WINDOW_LEN_MS / BUCKET_CNT;
@Before
public void init() {
conf = new Configuration();
conf.setInt(DFSConfigKeys.NNTOP_BUCKETS_PER_WINDOW_KEY, BUCKET_CNT);
conf.setInt(DFSConfigKeys.NNTOP_NUM_USERS_KEY, N_TOP_USERS);
manager = new RollingWindowManager(conf, WINDOW_LEN_MS);
users = new String[2 * N_TOP_USERS];
for (int i = 0; i < users.length; i++) {
users[i] = "user" + i;
}
}
@Test
public void testTops() {
long time = WINDOW_LEN_MS + BUCKET_LEN * 3 / 2;
for (int i = 0; i < users.length; i++)
manager.recordMetric(time, "open", users[i], (i + 1) * 2);
time++;
for (int i = 0; i < users.length; i++)
manager.recordMetric(time, "close", users[i], i + 1);
time++;
TopWindow tops = manager.snapshot(time);
assertEquals("Unexpected number of ops", 2, tops.getOps().size());
for (Op op : tops.getOps()) {
final List<User> topUsers = op.getTopUsers();
assertEquals("Unexpected number of users", N_TOP_USERS, topUsers.size());
if (op.getOpType() == "open") {
for (int i = 0; i < topUsers.size(); i++) {
User user = topUsers.get(i);
assertEquals("Unexpected count for user " + user.getUser(),
(users.length-i)*2, user.getCount());
}
// Closed form of sum(range(2,42,2))
assertEquals("Unexpected total count for op",
(2+(users.length*2))*(users.length/2),
op.getTotalCount());
}
}
// move the window forward not to see the "open" results
time += WINDOW_LEN_MS - 2;
tops = manager.snapshot(time);
assertEquals("Unexpected number of ops", 1, tops.getOps().size());
final Op op = tops.getOps().get(0);
assertEquals("Should only see close ops", "close", op.getOpType());
final List<User> topUsers = op.getTopUsers();
for (int i = 0; i < topUsers.size(); i++) {
User user = topUsers.get(i);
assertEquals("Unexpected count for user " + user.getUser(),
(users.length-i), user.getCount());
}
// Closed form of sum(range(1,21))
assertEquals("Unexpected total count for op",
(1 + users.length) * (users.length / 2), op.getTotalCount());
}
}
| 3,902
| 36.893204
| 95
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.web.resources;
import static org.mockito.Mockito.*;
import java.io.IOException;
import java.util.Arrays;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.web.WebHdfsTestUtil;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.PostOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.log4j.Level;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* Test WebHDFS which provides data locality using HTTP redirection.
*/
public class TestWebHdfsDataLocality {
static final Log LOG = LogFactory.getLog(TestWebHdfsDataLocality.class);
{
DFSTestUtil.setNameNodeLogLevel(Level.ALL);
}
private static final String RACK0 = "/rack0";
private static final String RACK1 = "/rack1";
private static final String RACK2 = "/rack2";
@Rule
public final ExpectedException exception = ExpectedException.none();
@Test
public void testDataLocality() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2};
final int nDataNodes = racks.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks));
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(nDataNodes)
.racks(racks)
.build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final NameNode namenode = cluster.getNameNode();
final DatanodeManager dm = namenode.getNamesystem().getBlockManager(
).getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f = "/foo";
{ //test CREATE
for(int i = 0; i < nDataNodes; i++) {
//set client address to a particular datanode
final DataNode dn = cluster.getDataNodes().get(i);
final String ipAddr = dm.getDatanode(dn.getDatanodeId()).getIpAddr();
//The chosen datanode must be the same as the client address
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
namenode, f, PutOpParam.Op.CREATE, -1L, blocksize, null);
Assert.assertEquals(ipAddr, chosen.getIpAddr());
}
}
//create a file with one replica.
final Path p = new Path(f);
final FSDataOutputStream out = dfs.create(p, (short)1);
out.write(1);
out.close();
//get replica location.
final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
namenode, f, 0, 1);
final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
Assert.assertEquals(1, lb.size());
final DatanodeInfo[] locations = lb.get(0).getLocations();
Assert.assertEquals(1, locations.length);
final DatanodeInfo expected = locations[0];
//For GETFILECHECKSUM, OPEN and APPEND,
//the chosen datanode must be the same as the replica location.
{ //test GETFILECHECKSUM
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize, null);
Assert.assertEquals(expected, chosen);
}
{ //test OPEN
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
namenode, f, GetOpParam.Op.OPEN, 0, blocksize, null);
Assert.assertEquals(expected, chosen);
}
{ //test APPEND
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
namenode, f, PostOpParam.Op.APPEND, -1L, blocksize, null);
Assert.assertEquals(expected, chosen);
}
} finally {
cluster.shutdown();
}
}
@Test
public void testExcludeDataNodes() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();
final String[] racks = {RACK0, RACK0, RACK1, RACK1, RACK2, RACK2};
final String[] hosts = {"DataNode1", "DataNode2", "DataNode3","DataNode4","DataNode5","DataNode6"};
final int nDataNodes = hosts.length;
LOG.info("nDataNodes=" + nDataNodes + ", racks=" + Arrays.asList(racks)
+ ", hosts=" + Arrays.asList(hosts));
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.hosts(hosts).numDataNodes(nDataNodes).racks(racks).build();
try {
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final NameNode namenode = cluster.getNameNode();
final DatanodeManager dm = namenode.getNamesystem().getBlockManager(
).getDatanodeManager();
LOG.info("dm=" + dm);
final long blocksize = DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
final String f = "/foo";
//create a file with three replica.
final Path p = new Path(f);
final FSDataOutputStream out = dfs.create(p, (short)3);
out.write(1);
out.close();
//get replica location.
final LocatedBlocks locatedblocks = NameNodeAdapter.getBlockLocations(
namenode, f, 0, 1);
final List<LocatedBlock> lb = locatedblocks.getLocatedBlocks();
Assert.assertEquals(1, lb.size());
final DatanodeInfo[] locations = lb.get(0).getLocations();
Assert.assertEquals(3, locations.length);
//For GETFILECHECKSUM, OPEN and APPEND,
//the chosen datanode must be different with exclude nodes.
StringBuffer sb = new StringBuffer();
for (int i = 0; i < 2; i++) {
sb.append(locations[i].getXferAddr());
{ // test GETFILECHECKSUM
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
namenode, f, GetOpParam.Op.GETFILECHECKSUM, -1L, blocksize,
sb.toString());
for (int j = 0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),
chosen.getHostName());
}
}
{ // test OPEN
final DatanodeInfo chosen = NamenodeWebHdfsMethods.chooseDatanode(
namenode, f, GetOpParam.Op.OPEN, 0, blocksize, sb.toString());
for (int j = 0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),
chosen.getHostName());
}
}
{ // test APPEND
final DatanodeInfo chosen = NamenodeWebHdfsMethods
.chooseDatanode(namenode, f, PostOpParam.Op.APPEND, -1L,
blocksize, sb.toString());
for (int j = 0; j <= i; j++) {
Assert.assertNotEquals(locations[j].getHostName(),
chosen.getHostName());
}
}
sb.append(",");
}
} finally {
cluster.shutdown();
}
}
@Test
public void testChooseDatanodeBeforeNamesystemInit() throws Exception {
NameNode nn = mock(NameNode.class);
when(nn.getNamesystem()).thenReturn(null);
exception.expect(IOException.class);
exception.expectMessage("Namesystem has not been intialized yet.");
NamenodeWebHdfsMethods.chooseDatanode(nn, "/path", PutOpParam.Op.CREATE, 0,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT, null);
}
}
| 8,916
| 36.944681
| 103
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.metrics;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.assertQuantileGauges;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.DataInputStream;
import java.io.IOException;
import java.util.Random;
import com.google.common.collect.ImmutableList;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test for metrics published by the Namenode
*/
public class TestNameNodeMetrics {
private static final Configuration CONF = new HdfsConfiguration();
private static final int DFS_REPLICATION_INTERVAL = 1;
private static final Path TEST_ROOT_DIR_PATH =
new Path("/testNameNodeMetrics");
private static final String NN_METRICS = "NameNodeActivity";
private static final String NS_METRICS = "FSNamesystem";
public static final Log LOG = LogFactory.getLog(TestNameNodeMetrics.class);
// Number of datanodes in the cluster
private static final int DATANODE_COUNT = 3;
private static final int WAIT_GAUGE_VALUE_RETRIES = 20;
// Rollover interval of percentile metrics (in seconds)
private static final int PERCENTILES_INTERVAL = 1;
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
DFS_REPLICATION_INTERVAL);
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY,
DFS_REPLICATION_INTERVAL);
CONF.set(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
"" + PERCENTILES_INTERVAL);
// Enable stale DataNodes checking
CONF.setBoolean(DFSConfigKeys.DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY, true);
((Log4JLogger)LogFactory.getLog(MetricsAsserts.class))
.getLogger().setLevel(Level.DEBUG);
}
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private final Random rand = new Random();
private FSNamesystem namesystem;
private BlockManager bm;
private static Path getTestPath(String fileName) {
return new Path(TEST_ROOT_DIR_PATH, fileName);
}
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(DATANODE_COUNT).build();
cluster.waitActive();
namesystem = cluster.getNamesystem();
bm = namesystem.getBlockManager();
fs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
MetricsSource source = DefaultMetricsSystem.instance().getSource("UgiMetrics");
if (source != null) {
// Run only once since the UGI metrics is cleaned up during teardown
MetricsRecordBuilder rb = getMetrics(source);
assertQuantileGauges("GetGroups1s", rb);
}
cluster.shutdown();
}
/** create a file with a length of <code>fileLen</code> */
private void createFile(Path file, long fileLen, short replicas) throws IOException {
DFSTestUtil.createFile(fs, file, fileLen, replicas, rand.nextLong());
}
private void updateMetrics() throws Exception {
// Wait for metrics update (corresponds to dfs.namenode.replication.interval
// for some block related metrics to get updated)
Thread.sleep(1000);
}
private void readFile(FileSystem fileSys,Path name) throws IOException {
//Just read file so that getNumBlockLocations are incremented
DataInputStream stm = fileSys.open(name);
byte [] buffer = new byte[4];
stm.read(buffer,0,4);
stm.close();
}
/**
* Test that capacity metrics are exported and pass
* basic sanity tests.
*/
@Test (timeout = 1800)
public void testCapacityMetrics() throws Exception {
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
long capacityTotal = MetricsAsserts.getLongGauge("CapacityTotal", rb);
assert(capacityTotal != 0);
long capacityUsed = MetricsAsserts.getLongGauge("CapacityUsed", rb);
long capacityRemaining =
MetricsAsserts.getLongGauge("CapacityRemaining", rb);
long capacityUsedNonDFS =
MetricsAsserts.getLongGauge("CapacityUsedNonDFS", rb);
assert(capacityUsed + capacityRemaining + capacityUsedNonDFS ==
capacityTotal);
}
/** Test metrics indicating the number of stale DataNodes */
@Test
public void testStaleNodes() throws Exception {
// Set two datanodes as stale
for (int i = 0; i < 2; i++) {
DataNode dn = cluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
long staleInterval = CONF.getLong(
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager()
.getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDes, -(staleInterval + 1));
}
// Let HeartbeatManager to check heartbeat
BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem()
.getBlockManager());
assertGauge("StaleDataNodes", 2, getMetrics(NS_METRICS));
// Reset stale datanodes
for (int i = 0; i < 2; i++) {
DataNode dn = cluster.getDataNodes().get(i);
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
DatanodeDescriptor dnDes = cluster.getNameNode().getNamesystem()
.getBlockManager().getDatanodeManager()
.getDatanode(dn.getDatanodeId());
DFSTestUtil.resetLastUpdatesWithOffset(dnDes, 0);
}
// Let HeartbeatManager to refresh
BlockManagerTestUtil.checkHeartbeat(cluster.getNameNode().getNamesystem()
.getBlockManager());
assertGauge("StaleDataNodes", 0, getMetrics(NS_METRICS));
}
/** Test metrics associated with addition of a file */
@Test
public void testFileAdd() throws Exception {
// Add files with 100 blocks
final Path file = getTestPath("testFileAdd");
createFile(file, 3200, (short)3);
final long blockCount = 32;
int blockCapacity = namesystem.getBlockCapacity();
updateMetrics();
assertGauge("BlockCapacity", blockCapacity, getMetrics(NS_METRICS));
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
// File create operations is 1
// Number of files created is depth of <code>file</code> path
assertCounter("CreateFileOps", 1L, rb);
assertCounter("FilesCreated", (long)file.depth(), rb);
updateMetrics();
long filesTotal = file.depth() + 1; // Add 1 for root
rb = getMetrics(NS_METRICS);
assertGauge("FilesTotal", filesTotal, rb);
assertGauge("BlocksTotal", blockCount, rb);
fs.delete(file, true);
filesTotal--; // reduce the filecount for deleted file
rb = waitForDnMetricValue(NS_METRICS, "FilesTotal", filesTotal);
assertGauge("BlocksTotal", 0L, rb);
assertGauge("PendingDeletionBlocks", 0L, rb);
rb = getMetrics(NN_METRICS);
// Delete file operations and number of files deleted must be 1
assertCounter("DeleteFileOps", 1L, rb);
assertCounter("FilesDeleted", 1L, rb);
}
/** Corrupt a block and ensure metrics reflects it */
@Test
public void testCorruptBlock() throws Exception {
// Create a file with single block with two replicas
final Path file = getTestPath("testCorruptBlock");
createFile(file, 100, (short)2);
// Corrupt first replica of the block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), file.toString(), 0, 1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("CorruptBlocks", 1L, rb);
assertGauge("PendingReplicationBlocks", 1L, rb);
assertGauge("ScheduledReplicationBlocks", 1L, rb);
fs.delete(file, true);
rb = waitForDnMetricValue(NS_METRICS, "CorruptBlocks", 0L);
assertGauge("PendingReplicationBlocks", 0L, rb);
assertGauge("ScheduledReplicationBlocks", 0L, rb);
}
/** Create excess blocks by reducing the replication factor for
* for a file and ensure metrics reflects it
*/
@Test
public void testExcessBlocks() throws Exception {
Path file = getTestPath("testExcessBlocks");
createFile(file, 100, (short)2);
NameNodeAdapter.setReplication(namesystem, file.toString(), (short)1);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", 1L, rb);
// verify ExcessBlocks metric is decremented and
// excessReplicateMap is cleared after deleting a file
fs.delete(file, true);
rb = getMetrics(NS_METRICS);
assertGauge("ExcessBlocks", 0L, rb);
assertTrue(bm.excessReplicateMap.isEmpty());
}
/** Test to ensure metrics reflects missing blocks */
@Test
public void testMissingBlock() throws Exception {
// Create a file with single block with two replicas
Path file = getTestPath("testMissingBlocks");
createFile(file, 100, (short)1);
// Corrupt the only replica of the block to result in a missing block
LocatedBlock block = NameNodeAdapter.getBlockLocations(
cluster.getNameNode(), file.toString(), 0, 1).get(0);
cluster.getNamesystem().writeLock();
try {
bm.findAndMarkBlockAsCorrupt(block.getBlock(), block.getLocations()[0],
"STORAGE_ID", "TEST");
} finally {
cluster.getNamesystem().writeUnlock();
}
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NS_METRICS);
assertGauge("UnderReplicatedBlocks", 1L, rb);
assertGauge("MissingBlocks", 1L, rb);
assertGauge("MissingReplOneBlocks", 1L, rb);
fs.delete(file, true);
waitForDnMetricValue(NS_METRICS, "UnderReplicatedBlocks", 0L);
}
private void waitForDeletion() throws InterruptedException {
// Wait for more than DATANODE_COUNT replication intervals to ensure all
// the blocks pending deletion are sent for deletion to the datanodes.
Thread.sleep(DFS_REPLICATION_INTERVAL * (DATANODE_COUNT + 1) * 1000);
}
/**
* Wait for the named gauge value from the metrics source to reach the
* desired value.
*
* There's an initial delay then a spin cycle of sleep and poll. Because
* all the tests use a shared FS instance, these tests are not independent;
* that's why the initial sleep is in there.
*
* @param source metrics source
* @param name gauge name
* @param expected expected value
* @return the last metrics record polled
* @throws Exception if something went wrong.
*/
private MetricsRecordBuilder waitForDnMetricValue(String source,
String name,
long expected)
throws Exception {
MetricsRecordBuilder rb;
long gauge;
//initial wait.
waitForDeletion();
//lots of retries are allowed for slow systems; fast ones will still
//exit early
int retries = (DATANODE_COUNT + 1) * WAIT_GAUGE_VALUE_RETRIES;
rb = getMetrics(source);
gauge = MetricsAsserts.getLongGauge(name, rb);
while (gauge != expected && (--retries > 0)) {
Thread.sleep(DFS_REPLICATION_INTERVAL * 500);
rb = getMetrics(source);
gauge = MetricsAsserts.getLongGauge(name, rb);
}
//at this point the assertion is valid or the retry count ran out
assertGauge(name, expected, rb);
return rb;
}
@Test
public void testRenameMetrics() throws Exception {
Path src = getTestPath("src");
createFile(src, 100, (short)1);
Path target = getTestPath("target");
createFile(target, 100, (short)1);
fs.rename(src, target, Rename.OVERWRITE);
updateMetrics();
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
assertCounter("FilesRenamed", 1L, rb);
assertCounter("FilesDeleted", 1L, rb);
}
/**
* Test numGetBlockLocations metric
*
* Test initiates and performs file operations (create,read,close,open file )
* which results in metrics changes. These metrics changes are updated and
* tested for correctness.
*
* create file operation does not increment numGetBlockLocation
* one read file operation increments numGetBlockLocation by 1
*
* @throws IOException in case of an error
*/
@Test
public void testGetBlockLocationMetric() throws Exception {
Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "file1.dat");
// When cluster starts first time there are no file (read,create,open)
// operations so metric GetBlockLocations should be 0.
assertCounter("GetBlockLocations", 0L, getMetrics(NN_METRICS));
//Perform create file operation
createFile(file1_Path,100,(short)2);
updateMetrics();
//Create file does not change numGetBlockLocations metric
//expect numGetBlockLocations = 0 for previous and current interval
assertCounter("GetBlockLocations", 0L, getMetrics(NN_METRICS));
// Open and read file operation increments GetBlockLocations
// Perform read file operation on earlier created file
readFile(fs, file1_Path);
updateMetrics();
// Verify read file operation has incremented numGetBlockLocations by 1
assertCounter("GetBlockLocations", 1L, getMetrics(NN_METRICS));
// opening and reading file twice will increment numGetBlockLocations by 2
readFile(fs, file1_Path);
readFile(fs, file1_Path);
updateMetrics();
assertCounter("GetBlockLocations", 3L, getMetrics(NN_METRICS));
}
/**
* Testing TransactionsSinceLastCheckpoint. Need a new cluster as
* the other tests in here don't use HA. See HDFS-7501.
*/
@Test(timeout = 300000)
public void testTransactionSinceLastCheckpointMetrics() throws Exception {
Random random = new Random();
int retryCount = 0;
while (retryCount < 5) {
try {
int basePort = 10060 + random.nextInt(100) * 2;
MiniDFSNNTopology topology = new MiniDFSNNTopology()
.addNameservice(new MiniDFSNNTopology.NSConf("ns1")
.addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(basePort))
.addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(basePort + 1)));
HdfsConfiguration conf2 = new HdfsConfiguration();
// Lower the checkpoint condition for purpose of testing.
conf2.setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY,
100);
// Check for checkpoint condition very often, for purpose of testing.
conf2.setInt(
DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY,
1);
// Poll and follow ANN txns very often, for purpose of testing.
conf2.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1);
MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf2)
.nnTopology(topology).numDataNodes(1).build();
cluster2.waitActive();
DistributedFileSystem fs2 = cluster2.getFileSystem(0);
NameNode nn0 = cluster2.getNameNode(0);
NameNode nn1 = cluster2.getNameNode(1);
cluster2.transitionToActive(0);
fs2.mkdirs(new Path("/tmp-t1"));
fs2.mkdirs(new Path("/tmp-t2"));
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Test to ensure tracking works before the first-ever
// checkpoint.
assertEquals("SBN failed to track 2 transactions pre-checkpoint.",
4L, // 2 txns added further when catch-up is called.
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
// Complete up to the boundary required for
// an auto-checkpoint. Using 94 to expect fsimage
// rounded at 100, as 4 + 94 + 2 (catch-up call) = 100.
for (int i = 1; i <= 94; i++) {
fs2.mkdirs(new Path("/tmp-" + i));
}
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Assert 100 transactions in checkpoint.
HATestUtil.waitForCheckpoint(cluster2, 1, ImmutableList.of(100));
// Test to ensure number tracks the right state of
// uncheckpointed edits, and does not go negative
// (as fixed in HDFS-7501).
assertEquals("Should be zero right after the checkpoint.",
0L,
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
fs2.mkdirs(new Path("/tmp-t3"));
fs2.mkdirs(new Path("/tmp-t4"));
HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
// Test to ensure we track the right numbers after
// the checkpoint resets it to zero again.
assertEquals("SBN failed to track 2 added txns after the ckpt.",
4L,
cluster2.getNameNode(1).getNamesystem()
.getTransactionsSinceLastCheckpoint());
cluster2.shutdown();
break;
} catch (Exception e) {
LOG.warn("Unable to set up HA cluster, exception thrown: " + e);
retryCount++;
}
}
}
/**
* Test NN checkpoint and transaction-related metrics.
*/
@Test
public void testTransactionAndCheckpointMetrics() throws Exception {
long lastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
getMetrics(NS_METRICS));
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 1L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
fs.mkdirs(new Path(TEST_ROOT_DIR_PATH, "/tmp"));
updateMetrics();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 2L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 2L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 2L, getMetrics(NS_METRICS));
cluster.getNameNodeRpc().rollEditLog();
updateMetrics();
assertGauge("LastCheckpointTime", lastCkptTime, getMetrics(NS_METRICS));
assertGauge("LastWrittenTransactionId", 4L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 4L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
cluster.getNameNodeRpc().saveNamespace();
cluster.getNameNodeRpc().setSafeMode(SafeModeAction.SAFEMODE_LEAVE, false);
updateMetrics();
long newLastCkptTime = MetricsAsserts.getLongGauge("LastCheckpointTime",
getMetrics(NS_METRICS));
assertTrue(lastCkptTime < newLastCkptTime);
assertGauge("LastWrittenTransactionId", 6L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastCheckpoint", 1L, getMetrics(NS_METRICS));
assertGauge("TransactionsSinceLastLogRoll", 1L, getMetrics(NS_METRICS));
}
/**
* Tests that the sync and block report metrics get updated on cluster
* startup.
*/
@Test
public void testSyncAndBlockReportMetric() throws Exception {
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
// We have one sync when the cluster starts up, just opening the journal
assertCounter("SyncsNumOps", 1L, rb);
// Each datanode reports in when the cluster comes up
assertCounter("BlockReportNumOps",
(long)DATANODE_COUNT * cluster.getStoragesPerDatanode(), rb);
// Sleep for an interval+slop to let the percentiles rollover
Thread.sleep((PERCENTILES_INTERVAL+1)*1000);
// Check that the percentiles were updated
assertQuantileGauges("Syncs1s", rb);
assertQuantileGauges("BlockReport1s", rb);
}
/**
* Test NN ReadOps Count and WriteOps Count
*/
@Test
public void testReadWriteOps() throws Exception {
MetricsRecordBuilder rb = getMetrics(NN_METRICS);
long startWriteCounter = MetricsAsserts.getLongCounter("TransactionsNumOps",
rb);
Path file1_Path = new Path(TEST_ROOT_DIR_PATH, "ReadData.dat");
//Perform create file operation
createFile(file1_Path, 1024 * 1024,(short)2);
// Perform read file operation on earlier created file
readFile(fs, file1_Path);
MetricsRecordBuilder rbNew = getMetrics(NN_METRICS);
assertTrue(MetricsAsserts.getLongCounter("TransactionsNumOps", rbNew) >
startWriteCounter);
}
/**
* Test metrics indicating the number of active clients and the files under
* construction
*/
@Test(timeout = 60000)
public void testNumActiveClientsAndFilesUnderConstructionMetrics()
throws Exception {
final Path file1 = getTestPath("testFileAdd1");
createFile(file1, 100, (short) 3);
assertGauge("NumActiveClients", 0L, getMetrics(NS_METRICS));
assertGauge("NumFilesUnderConstruction", 0L, getMetrics(NS_METRICS));
Path file2 = new Path("/testFileAdd2");
FSDataOutputStream output2 = fs.create(file2);
output2.writeBytes("Some test data");
assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
assertGauge("NumFilesUnderConstruction", 1L, getMetrics(NS_METRICS));
Path file3 = new Path("/testFileAdd3");
FSDataOutputStream output3 = fs.create(file3);
output3.writeBytes("Some test data");
assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
assertGauge("NumFilesUnderConstruction", 2L, getMetrics(NS_METRICS));
// create another DistributedFileSystem client
DistributedFileSystem fs1 = (DistributedFileSystem) cluster
.getNewFileSystemInstance(0);
try {
Path file4 = new Path("/testFileAdd4");
FSDataOutputStream output4 = fs1.create(file4);
output4.writeBytes("Some test data");
assertGauge("NumActiveClients", 2L, getMetrics(NS_METRICS));
assertGauge("NumFilesUnderConstruction", 3L, getMetrics(NS_METRICS));
Path file5 = new Path("/testFileAdd35");
FSDataOutputStream output5 = fs1.create(file5);
output5.writeBytes("Some test data");
assertGauge("NumActiveClients", 2L, getMetrics(NS_METRICS));
assertGauge("NumFilesUnderConstruction", 4L, getMetrics(NS_METRICS));
output2.close();
output3.close();
assertGauge("NumActiveClients", 1L, getMetrics(NS_METRICS));
assertGauge("NumFilesUnderConstruction", 2L, getMetrics(NS_METRICS));
output4.close();
output5.close();
assertGauge("NumActiveClients", 0L, getMetrics(NS_METRICS));
assertGauge("NumFilesUnderConstruction", 0L, getMetrics(NS_METRICS));
} finally {
fs1.close();
}
}
}
| 25,330
| 39.659711
| 88
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNNMetricFilesInGetListingOps.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.metrics;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import java.io.IOException;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test case for FilesInGetListingOps metric in Namenode
*/
public class TestNNMetricFilesInGetListingOps {
private static final Configuration CONF = new HdfsConfiguration();
private static final String NN_METRICS = "NameNodeActivity";
static {
CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 100);
CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 1);
CONF.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
CONF.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_INTERVAL_KEY, 1);
}
private MiniDFSCluster cluster;
private DistributedFileSystem fs;
private final Random rand = new Random();
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(CONF).build();
cluster.waitActive();
cluster.getNameNode();
fs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
cluster.shutdown();
}
/** create a file with a length of <code>fileLen</code> */
private void createFile(String fileName, long fileLen, short replicas) throws IOException {
Path filePath = new Path(fileName);
DFSTestUtil.createFile(fs, filePath, fileLen, replicas, rand.nextLong());
}
@Test
public void testFilesInGetListingOps() throws Exception {
createFile("/tmp1/t1", 3200, (short)3);
createFile("/tmp1/t2", 3200, (short)3);
createFile("/tmp2/t1", 3200, (short)3);
createFile("/tmp2/t2", 3200, (short)3);
cluster.getNameNodeRpc().getListing("/tmp1", HdfsFileStatus.EMPTY_NAME, false);
assertCounter("FilesInGetListingOps", 2L, getMetrics(NN_METRICS));
cluster.getNameNodeRpc().getListing("/tmp2", HdfsFileStatus.EMPTY_NAME, false);
assertCounter("FilesInGetListingOps", 4L, getMetrics(NN_METRICS));
}
}
| 3,255
| 36
| 93
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/StartupProgressTestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.*;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType.*;
/**
* Utility methods that help with writing tests covering startup progress.
*/
public class StartupProgressTestHelper {
/**
* Increments a counter a certain number of times.
*
* @param prog StartupProgress to increment
* @param phase Phase to increment
* @param step Step to increment
* @param delta long number of times to increment
*/
public static void incrementCounter(StartupProgress prog, Phase phase,
Step step, long delta) {
StartupProgress.Counter counter = prog.getCounter(phase, step);
for (long i = 0; i < delta; ++i) {
counter.increment();
}
}
/**
* Sets up StartupProgress to a state part-way through the startup sequence.
*
* @param prog StartupProgress to set
*/
public static void setStartupProgressForRunningState(StartupProgress prog) {
prog.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes = new Step(INODES);
prog.beginStep(LOADING_FSIMAGE, loadingFsImageInodes);
prog.setTotal(LOADING_FSIMAGE, loadingFsImageInodes, 100L);
incrementCounter(prog, LOADING_FSIMAGE, loadingFsImageInodes, 100L);
prog.endStep(LOADING_FSIMAGE, loadingFsImageInodes);
prog.endPhase(LOADING_FSIMAGE);
prog.beginPhase(LOADING_EDITS);
Step loadingEditsFile = new Step("file", 1000L);
prog.beginStep(LOADING_EDITS, loadingEditsFile);
prog.setTotal(LOADING_EDITS, loadingEditsFile, 200L);
incrementCounter(prog, LOADING_EDITS, loadingEditsFile, 100L);
}
/**
* Sets up StartupProgress to final state after startup sequence has completed.
*
* @param prog StartupProgress to set
*/
public static void setStartupProgressForFinalState(StartupProgress prog) {
prog.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes = new Step(INODES);
prog.beginStep(LOADING_FSIMAGE, loadingFsImageInodes);
prog.setTotal(LOADING_FSIMAGE, loadingFsImageInodes, 100L);
incrementCounter(prog, LOADING_FSIMAGE, loadingFsImageInodes, 100L);
prog.endStep(LOADING_FSIMAGE, loadingFsImageInodes);
prog.endPhase(LOADING_FSIMAGE);
prog.beginPhase(LOADING_EDITS);
Step loadingEditsFile = new Step("file", 1000L);
prog.beginStep(LOADING_EDITS, loadingEditsFile);
prog.setTotal(LOADING_EDITS, loadingEditsFile, 200L);
incrementCounter(prog, LOADING_EDITS, loadingEditsFile, 200L);
prog.endStep(LOADING_EDITS, loadingEditsFile);
prog.endPhase(LOADING_EDITS);
prog.beginPhase(SAVING_CHECKPOINT);
Step savingCheckpointInodes = new Step(INODES);
prog.beginStep(SAVING_CHECKPOINT, savingCheckpointInodes);
prog.setTotal(SAVING_CHECKPOINT, savingCheckpointInodes, 300L);
incrementCounter(prog, SAVING_CHECKPOINT, savingCheckpointInodes, 300L);
prog.endStep(SAVING_CHECKPOINT, savingCheckpointInodes);
prog.endPhase(SAVING_CHECKPOINT);
prog.beginPhase(SAFEMODE);
Step awaitingBlocks = new Step(AWAITING_REPORTED_BLOCKS);
prog.beginStep(SAFEMODE, awaitingBlocks);
prog.setTotal(SAFEMODE, awaitingBlocks, 400L);
incrementCounter(prog, SAFEMODE, awaitingBlocks, 400L);
prog.endStep(SAFEMODE, awaitingBlocks);
prog.endPhase(SAFEMODE);
}
}
| 4,190
| 39.298077
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgress.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.*;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressTestHelper.*;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Status.*;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType.*;
import static org.junit.Assert.*;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import org.junit.Before;
import org.junit.Test;
public class TestStartupProgress {
private StartupProgress startupProgress;
@Before
public void setUp() {
startupProgress = new StartupProgress();
}
@Test(timeout=10000)
public void testCounter() {
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes = new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE, loadingFsImageInodes);
incrementCounter(startupProgress, LOADING_FSIMAGE, loadingFsImageInodes,
100L);
startupProgress.endStep(LOADING_FSIMAGE, loadingFsImageInodes);
Step loadingFsImageDelegationKeys = new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE, loadingFsImageDelegationKeys);
incrementCounter(startupProgress, LOADING_FSIMAGE,
loadingFsImageDelegationKeys, 200L);
startupProgress.endStep(LOADING_FSIMAGE, loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile = new Step("file", 1000L);
startupProgress.beginStep(LOADING_EDITS, loadingEditsFile);
incrementCounter(startupProgress, LOADING_EDITS, loadingEditsFile, 5000L);
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertEquals(100L, view.getCount(LOADING_FSIMAGE, loadingFsImageInodes));
assertEquals(200L, view.getCount(LOADING_FSIMAGE,
loadingFsImageDelegationKeys));
assertEquals(5000L, view.getCount(LOADING_EDITS, loadingEditsFile));
assertEquals(0L, view.getCount(SAVING_CHECKPOINT,
new Step(INODES)));
// Increment a counter again and check that the existing view was not
// modified, but a new view shows the updated value.
incrementCounter(startupProgress, LOADING_EDITS, loadingEditsFile, 1000L);
startupProgress.endStep(LOADING_EDITS, loadingEditsFile);
startupProgress.endPhase(LOADING_EDITS);
assertEquals(5000L, view.getCount(LOADING_EDITS, loadingEditsFile));
view = startupProgress.createView();
assertNotNull(view);
assertEquals(6000L, view.getCount(LOADING_EDITS, loadingEditsFile));
}
@Test(timeout=10000)
public void testElapsedTime() throws Exception {
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes = new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE, loadingFsImageInodes);
Thread.sleep(50L); // brief sleep to fake elapsed time
startupProgress.endStep(LOADING_FSIMAGE, loadingFsImageInodes);
Step loadingFsImageDelegationKeys = new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE, loadingFsImageDelegationKeys);
Thread.sleep(50L); // brief sleep to fake elapsed time
startupProgress.endStep(LOADING_FSIMAGE, loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile = new Step("file", 1000L);
startupProgress.beginStep(LOADING_EDITS, loadingEditsFile);
startupProgress.setTotal(LOADING_EDITS, loadingEditsFile, 10000L);
incrementCounter(startupProgress, LOADING_EDITS, loadingEditsFile, 5000L);
Thread.sleep(50L); // brief sleep to fake elapsed time
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertTrue(view.getElapsedTime() > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE) > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE,
loadingFsImageInodes) > 0);
assertTrue(view.getElapsedTime(LOADING_FSIMAGE,
loadingFsImageDelegationKeys) > 0);
assertTrue(view.getElapsedTime(LOADING_EDITS) > 0);
assertTrue(view.getElapsedTime(LOADING_EDITS, loadingEditsFile) > 0);
assertTrue(view.getElapsedTime(SAVING_CHECKPOINT) == 0);
assertTrue(view.getElapsedTime(SAVING_CHECKPOINT,
new Step(INODES)) == 0);
// Brief sleep, then check that completed phases/steps have the same elapsed
// time, but running phases/steps have updated elapsed time.
long totalTime = view.getElapsedTime();
long loadingFsImageTime = view.getElapsedTime(LOADING_FSIMAGE);
long loadingFsImageInodesTime = view.getElapsedTime(LOADING_FSIMAGE,
loadingFsImageInodes);
long loadingFsImageDelegationKeysTime = view.getElapsedTime(LOADING_FSIMAGE,
loadingFsImageInodes);
long loadingEditsTime = view.getElapsedTime(LOADING_EDITS);
long loadingEditsFileTime = view.getElapsedTime(LOADING_EDITS,
loadingEditsFile);
Thread.sleep(50L);
assertTrue(totalTime < view.getElapsedTime());
assertEquals(loadingFsImageTime, view.getElapsedTime(LOADING_FSIMAGE));
assertEquals(loadingFsImageInodesTime, view.getElapsedTime(LOADING_FSIMAGE,
loadingFsImageInodes));
assertTrue(loadingEditsTime < view.getElapsedTime(LOADING_EDITS));
assertTrue(loadingEditsFileTime < view.getElapsedTime(LOADING_EDITS,
loadingEditsFile));
}
@Test(timeout=10000)
public void testFrozenAfterStartupCompletes() {
// Do some updates and counter increments.
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.setFile(LOADING_FSIMAGE, "file1");
startupProgress.setSize(LOADING_FSIMAGE, 1000L);
Step step = new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE, step);
startupProgress.setTotal(LOADING_FSIMAGE, step, 10000L);
incrementCounter(startupProgress, LOADING_FSIMAGE, step, 100L);
startupProgress.endStep(LOADING_FSIMAGE, step);
startupProgress.endPhase(LOADING_FSIMAGE);
// Force completion of phases, so that entire startup process is completed.
for (Phase phase: EnumSet.allOf(Phase.class)) {
if (startupProgress.getStatus(phase) != Status.COMPLETE) {
startupProgress.beginPhase(phase);
startupProgress.endPhase(phase);
}
}
StartupProgressView before = startupProgress.createView();
// Attempt more updates and counter increments.
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.setFile(LOADING_FSIMAGE, "file2");
startupProgress.setSize(LOADING_FSIMAGE, 2000L);
startupProgress.beginStep(LOADING_FSIMAGE, step);
startupProgress.setTotal(LOADING_FSIMAGE, step, 20000L);
incrementCounter(startupProgress, LOADING_FSIMAGE, step, 100L);
startupProgress.endStep(LOADING_FSIMAGE, step);
startupProgress.endPhase(LOADING_FSIMAGE);
// Also attempt a whole new step that wasn't used last time.
startupProgress.beginPhase(LOADING_EDITS);
Step newStep = new Step("file1");
startupProgress.beginStep(LOADING_EDITS, newStep);
incrementCounter(startupProgress, LOADING_EDITS, newStep, 100L);
startupProgress.endStep(LOADING_EDITS, newStep);
startupProgress.endPhase(LOADING_EDITS);
StartupProgressView after = startupProgress.createView();
// Expect that data was frozen after completion of entire startup process, so
// second set of updates and counter increments should have had no effect.
assertEquals(before.getCount(LOADING_FSIMAGE),
after.getCount(LOADING_FSIMAGE));
assertEquals(before.getCount(LOADING_FSIMAGE, step),
after.getCount(LOADING_FSIMAGE, step));
assertEquals(before.getElapsedTime(), after.getElapsedTime());
assertEquals(before.getElapsedTime(LOADING_FSIMAGE),
after.getElapsedTime(LOADING_FSIMAGE));
assertEquals(before.getElapsedTime(LOADING_FSIMAGE, step),
after.getElapsedTime(LOADING_FSIMAGE, step));
assertEquals(before.getFile(LOADING_FSIMAGE),
after.getFile(LOADING_FSIMAGE));
assertEquals(before.getSize(LOADING_FSIMAGE),
after.getSize(LOADING_FSIMAGE));
assertEquals(before.getTotal(LOADING_FSIMAGE),
after.getTotal(LOADING_FSIMAGE));
assertEquals(before.getTotal(LOADING_FSIMAGE, step),
after.getTotal(LOADING_FSIMAGE, step));
assertFalse(after.getSteps(LOADING_EDITS).iterator().hasNext());
}
@Test(timeout=10000)
public void testInitialState() {
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertEquals(0L, view.getElapsedTime());
assertEquals(0.0f, view.getPercentComplete(), 0.001f);
List<Phase> phases = new ArrayList<Phase>();
for (Phase phase: view.getPhases()) {
phases.add(phase);
assertEquals(0L, view.getElapsedTime(phase));
assertNull(view.getFile(phase));
assertEquals(0.0f, view.getPercentComplete(phase), 0.001f);
assertEquals(Long.MIN_VALUE, view.getSize(phase));
assertEquals(PENDING, view.getStatus(phase));
assertEquals(0L, view.getTotal(phase));
for (Step step: view.getSteps(phase)) {
fail(String.format("unexpected step %s in phase %s at initial state",
step, phase));
}
}
assertArrayEquals(EnumSet.allOf(Phase.class).toArray(), phases.toArray());
}
@Test(timeout=10000)
public void testPercentComplete() {
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes = new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE, loadingFsImageInodes);
startupProgress.setTotal(LOADING_FSIMAGE, loadingFsImageInodes, 1000L);
incrementCounter(startupProgress, LOADING_FSIMAGE, loadingFsImageInodes,
100L);
Step loadingFsImageDelegationKeys = new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE, loadingFsImageDelegationKeys);
startupProgress.setTotal(LOADING_FSIMAGE, loadingFsImageDelegationKeys,
800L);
incrementCounter(startupProgress, LOADING_FSIMAGE,
loadingFsImageDelegationKeys, 200L);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile = new Step("file", 1000L);
startupProgress.beginStep(LOADING_EDITS, loadingEditsFile);
startupProgress.setTotal(LOADING_EDITS, loadingEditsFile, 10000L);
incrementCounter(startupProgress, LOADING_EDITS, loadingEditsFile, 5000L);
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertEquals(0.167f, view.getPercentComplete(), 0.001f);
assertEquals(0.167f, view.getPercentComplete(LOADING_FSIMAGE), 0.001f);
assertEquals(0.10f, view.getPercentComplete(LOADING_FSIMAGE,
loadingFsImageInodes), 0.001f);
assertEquals(0.25f, view.getPercentComplete(LOADING_FSIMAGE,
loadingFsImageDelegationKeys), 0.001f);
assertEquals(0.5f, view.getPercentComplete(LOADING_EDITS), 0.001f);
assertEquals(0.5f, view.getPercentComplete(LOADING_EDITS, loadingEditsFile),
0.001f);
assertEquals(0.0f, view.getPercentComplete(SAVING_CHECKPOINT), 0.001f);
assertEquals(0.0f, view.getPercentComplete(SAVING_CHECKPOINT,
new Step(INODES)), 0.001f);
// End steps/phases, and confirm that they jump to 100% completion.
startupProgress.endStep(LOADING_FSIMAGE, loadingFsImageInodes);
startupProgress.endStep(LOADING_FSIMAGE, loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.endStep(LOADING_EDITS, loadingEditsFile);
startupProgress.endPhase(LOADING_EDITS);
view = startupProgress.createView();
assertNotNull(view);
assertEquals(0.5f, view.getPercentComplete(), 0.001f);
assertEquals(1.0f, view.getPercentComplete(LOADING_FSIMAGE), 0.001f);
assertEquals(1.0f, view.getPercentComplete(LOADING_FSIMAGE,
loadingFsImageInodes), 0.001f);
assertEquals(1.0f, view.getPercentComplete(LOADING_FSIMAGE,
loadingFsImageDelegationKeys), 0.001f);
assertEquals(1.0f, view.getPercentComplete(LOADING_EDITS), 0.001f);
assertEquals(1.0f, view.getPercentComplete(LOADING_EDITS, loadingEditsFile),
0.001f);
assertEquals(0.0f, view.getPercentComplete(SAVING_CHECKPOINT), 0.001f);
assertEquals(0.0f, view.getPercentComplete(SAVING_CHECKPOINT,
new Step(INODES)), 0.001f);
}
@Test(timeout=10000)
public void testStatus() {
startupProgress.beginPhase(LOADING_FSIMAGE);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertEquals(COMPLETE, view.getStatus(LOADING_FSIMAGE));
assertEquals(RUNNING, view.getStatus(LOADING_EDITS));
assertEquals(PENDING, view.getStatus(SAVING_CHECKPOINT));
}
@Test(timeout=10000)
public void testStepSequence() {
// Test that steps are returned in the correct sort order (by file and then
// sequence number) by starting a few steps in a randomly shuffled order and
// then asserting that they are returned in the expected order.
Step[] expectedSteps = new Step[] {
new Step(INODES, "file1"),
new Step(DELEGATION_KEYS, "file1"),
new Step(INODES, "file2"),
new Step(DELEGATION_KEYS, "file2"),
new Step(INODES, "file3"),
new Step(DELEGATION_KEYS, "file3")
};
List<Step> shuffledSteps = new ArrayList<Step>(Arrays.asList(expectedSteps));
Collections.shuffle(shuffledSteps);
startupProgress.beginPhase(SAVING_CHECKPOINT);
for (Step step: shuffledSteps) {
startupProgress.beginStep(SAVING_CHECKPOINT, step);
}
List<Step> actualSteps = new ArrayList<Step>(expectedSteps.length);
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
for (Step step: view.getSteps(SAVING_CHECKPOINT)) {
actualSteps.add(step);
}
assertArrayEquals(expectedSteps, actualSteps.toArray());
}
@Test(timeout=10000)
public void testThreadSafety() throws Exception {
// Test for thread safety by starting multiple threads that mutate the same
// StartupProgress instance in various ways. We expect no internal
// corruption of data structures and no lost updates on counter increments.
int numThreads = 100;
// Data tables used by each thread to determine values to pass to APIs.
Phase[] phases = { LOADING_FSIMAGE, LOADING_FSIMAGE, LOADING_EDITS,
LOADING_EDITS };
Step[] steps = new Step[] { new Step(INODES), new Step(DELEGATION_KEYS),
new Step(INODES), new Step(DELEGATION_KEYS) };
String[] files = { "file1", "file1", "file2", "file2" };
long[] sizes = { 1000L, 1000L, 2000L, 2000L };
long[] totals = { 10000L, 20000L, 30000L, 40000L };
ExecutorService exec = Executors.newFixedThreadPool(numThreads);
try {
for (int i = 0; i < numThreads; ++i) {
final Phase phase = phases[i % phases.length];
final Step step = steps[i % steps.length];
final String file = files[i % files.length];
final long size = sizes[i % sizes.length];
final long total = totals[i % totals.length];
exec.submit(new Callable<Void>() {
@Override
public Void call() {
startupProgress.beginPhase(phase);
startupProgress.setFile(phase, file);
startupProgress.setSize(phase, size);
startupProgress.setTotal(phase, step, total);
incrementCounter(startupProgress, phase, step, 100L);
startupProgress.endStep(phase, step);
startupProgress.endPhase(phase);
return null;
}
});
}
} finally {
exec.shutdown();
assertTrue(exec.awaitTermination(10000L, TimeUnit.MILLISECONDS));
}
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertEquals("file1", view.getFile(LOADING_FSIMAGE));
assertEquals(1000L, view.getSize(LOADING_FSIMAGE));
assertEquals(10000L, view.getTotal(LOADING_FSIMAGE, new Step(INODES)));
assertEquals(2500L, view.getCount(LOADING_FSIMAGE, new Step(INODES)));
assertEquals(20000L, view.getTotal(LOADING_FSIMAGE,
new Step(DELEGATION_KEYS)));
assertEquals(2500L, view.getCount(LOADING_FSIMAGE,
new Step(DELEGATION_KEYS)));
assertEquals("file2", view.getFile(LOADING_EDITS));
assertEquals(2000L, view.getSize(LOADING_EDITS));
assertEquals(30000L, view.getTotal(LOADING_EDITS, new Step(INODES)));
assertEquals(2500L, view.getCount(LOADING_EDITS, new Step(INODES)));
assertEquals(40000L, view.getTotal(LOADING_EDITS,
new Step(DELEGATION_KEYS)));
assertEquals(2500L, view.getCount(LOADING_EDITS, new Step(DELEGATION_KEYS)));
}
@Test(timeout=10000)
public void testTotal() {
startupProgress.beginPhase(LOADING_FSIMAGE);
Step loadingFsImageInodes = new Step(INODES);
startupProgress.beginStep(LOADING_FSIMAGE, loadingFsImageInodes);
startupProgress.setTotal(LOADING_FSIMAGE, loadingFsImageInodes, 1000L);
startupProgress.endStep(LOADING_FSIMAGE, loadingFsImageInodes);
Step loadingFsImageDelegationKeys = new Step(DELEGATION_KEYS);
startupProgress.beginStep(LOADING_FSIMAGE, loadingFsImageDelegationKeys);
startupProgress.setTotal(LOADING_FSIMAGE, loadingFsImageDelegationKeys,
800L);
startupProgress.endStep(LOADING_FSIMAGE, loadingFsImageDelegationKeys);
startupProgress.endPhase(LOADING_FSIMAGE);
startupProgress.beginPhase(LOADING_EDITS);
Step loadingEditsFile = new Step("file", 1000L);
startupProgress.beginStep(LOADING_EDITS, loadingEditsFile);
startupProgress.setTotal(LOADING_EDITS, loadingEditsFile, 10000L);
startupProgress.endStep(LOADING_EDITS, loadingEditsFile);
startupProgress.endPhase(LOADING_EDITS);
StartupProgressView view = startupProgress.createView();
assertNotNull(view);
assertEquals(1000L, view.getTotal(LOADING_FSIMAGE, loadingFsImageInodes));
assertEquals(800L, view.getTotal(LOADING_FSIMAGE,
loadingFsImageDelegationKeys));
assertEquals(10000L, view.getTotal(LOADING_EDITS, loadingEditsFile));
}
}
| 19,133
| 42.784897
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/startupprogress/TestStartupProgressMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.startupprogress;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase.*;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgressTestHelper.*;
import static org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType.*;
import static org.apache.hadoop.test.MetricsAsserts.*;
import static org.junit.Assert.*;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.junit.Before;
import org.junit.Test;
public class TestStartupProgressMetrics {
private StartupProgress startupProgress;
private StartupProgressMetrics metrics;
@Before
public void setUp() {
mockMetricsSystem();
startupProgress = new StartupProgress();
metrics = new StartupProgressMetrics(startupProgress);
}
@Test
public void testInitialState() {
MetricsRecordBuilder builder = getMetrics(metrics, true);
assertCounter("ElapsedTime", 0L, builder);
assertGauge("PercentComplete", 0.0f, builder);
assertCounter("LoadingFsImageCount", 0L, builder);
assertCounter("LoadingFsImageElapsedTime", 0L, builder);
assertCounter("LoadingFsImageTotal", 0L, builder);
assertGauge("LoadingFsImagePercentComplete", 0.0f, builder);
assertCounter("LoadingEditsCount", 0L, builder);
assertCounter("LoadingEditsElapsedTime", 0L, builder);
assertCounter("LoadingEditsTotal", 0L, builder);
assertGauge("LoadingEditsPercentComplete", 0.0f, builder);
assertCounter("SavingCheckpointCount", 0L, builder);
assertCounter("SavingCheckpointElapsedTime", 0L, builder);
assertCounter("SavingCheckpointTotal", 0L, builder);
assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
assertCounter("SafeModeCount", 0L, builder);
assertCounter("SafeModeElapsedTime", 0L, builder);
assertCounter("SafeModeTotal", 0L, builder);
assertGauge("SafeModePercentComplete", 0.0f, builder);
}
@Test
public void testRunningState() {
setStartupProgressForRunningState(startupProgress);
MetricsRecordBuilder builder = getMetrics(metrics, true);
assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
assertGauge("PercentComplete", 0.375f, builder);
assertCounter("LoadingFsImageCount", 100L, builder);
assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
assertCounter("LoadingFsImageTotal", 100L, builder);
assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
assertCounter("LoadingEditsCount", 100L, builder);
assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
assertCounter("LoadingEditsTotal", 200L, builder);
assertGauge("LoadingEditsPercentComplete", 0.5f, builder);
assertCounter("SavingCheckpointCount", 0L, builder);
assertCounter("SavingCheckpointElapsedTime", 0L, builder);
assertCounter("SavingCheckpointTotal", 0L, builder);
assertGauge("SavingCheckpointPercentComplete", 0.0f, builder);
assertCounter("SafeModeCount", 0L, builder);
assertCounter("SafeModeElapsedTime", 0L, builder);
assertCounter("SafeModeTotal", 0L, builder);
assertGauge("SafeModePercentComplete", 0.0f, builder);
}
@Test
public void testFinalState() {
setStartupProgressForFinalState(startupProgress);
MetricsRecordBuilder builder = getMetrics(metrics, true);
assertTrue(getLongCounter("ElapsedTime", builder) >= 0L);
assertGauge("PercentComplete", 1.0f, builder);
assertCounter("LoadingFsImageCount", 100L, builder);
assertTrue(getLongCounter("LoadingFsImageElapsedTime", builder) >= 0L);
assertCounter("LoadingFsImageTotal", 100L, builder);
assertGauge("LoadingFsImagePercentComplete", 1.0f, builder);
assertCounter("LoadingEditsCount", 200L, builder);
assertTrue(getLongCounter("LoadingEditsElapsedTime", builder) >= 0L);
assertCounter("LoadingEditsTotal", 200L, builder);
assertGauge("LoadingEditsPercentComplete", 1.0f, builder);
assertCounter("SavingCheckpointCount", 300L, builder);
assertTrue(getLongCounter("SavingCheckpointElapsedTime", builder) >= 0L);
assertCounter("SavingCheckpointTotal", 300L, builder);
assertGauge("SavingCheckpointPercentComplete", 1.0f, builder);
assertCounter("SafeModeCount", 400L, builder);
assertTrue(getLongCounter("SafeModeElapsedTime", builder) >= 0L);
assertCounter("SafeModeTotal", 400L, builder);
assertGauge("SafeModePercentComplete", 1.0f, builder);
}
}
| 5,262
| 44.765217
| 97
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature.SNAPSHOT_LIMIT;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.Random;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/** Testing nested snapshots. */
public class TestNestedSnapshots {
static {
// These tests generate a large number of edits, and repeated edit log
// flushes can be a bottleneck.
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
{
SnapshotTestHelper.disableLogs();
}
private static final long SEED = 0;
private static final Random RANDOM = new Random(SEED);
private static final short REPLICATION = 3;
private static final long BLOCKSIZE = 1024;
private static final Configuration conf = new Configuration();
private static MiniDFSCluster cluster;
private static DistributedFileSystem hdfs;
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Create a snapshot for /test/foo and create another snapshot for
* /test/foo/bar. Files created before the snapshots should appear in both
* snapshots and the files created after the snapshots should not appear in
* any of the snapshots.
*/
@Test (timeout=300000)
public void testNestedSnapshots() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path foo = new Path("/testNestedSnapshots/foo");
final Path bar = new Path(foo, "bar");
final Path file1 = new Path(bar, "file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
print("create file " + file1);
final String s1name = "foo-s1";
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, s1name);
hdfs.allowSnapshot(foo);
print("allow snapshot " + foo);
hdfs.createSnapshot(foo, s1name);
print("create snapshot " + s1name);
final String s2name = "bar-s2";
final Path s2path = SnapshotTestHelper.getSnapshotRoot(bar, s2name);
hdfs.allowSnapshot(bar);
print("allow snapshot " + bar);
hdfs.createSnapshot(bar, s2name);
print("create snapshot " + s2name);
final Path file2 = new Path(bar, "file2");
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, SEED);
print("create file " + file2);
assertFile(s1path, s2path, file1, true, true, true);
assertFile(s1path, s2path, file2, true, false, false);
//test root
final String rootStr = "/";
final Path rootPath = new Path(rootStr);
hdfs.allowSnapshot(rootPath);
print("allow snapshot " + rootStr);
final Path rootSnapshot = hdfs.createSnapshot(rootPath);
print("create snapshot " + rootSnapshot);
hdfs.deleteSnapshot(rootPath, rootSnapshot.getName());
print("delete snapshot " + rootSnapshot);
hdfs.disallowSnapshot(rootPath);
print("disallow snapshot " + rootStr);
//change foo to non-snapshottable
hdfs.deleteSnapshot(foo, s1name);
hdfs.disallowSnapshot(foo);
//test disallow nested snapshots
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(false);
try {
hdfs.allowSnapshot(rootPath);
Assert.fail();
} catch (SnapshotException se) {
assertNestedSnapshotException(
se, "subdirectory");
}
try {
hdfs.allowSnapshot(foo);
Assert.fail();
} catch (SnapshotException se) {
assertNestedSnapshotException(
se, "subdirectory");
}
final Path sub1Bar = new Path(bar, "sub1");
final Path sub2Bar = new Path(sub1Bar, "sub2");
hdfs.mkdirs(sub2Bar);
try {
hdfs.allowSnapshot(sub1Bar);
Assert.fail();
} catch (SnapshotException se) {
assertNestedSnapshotException(
se, "ancestor");
}
try {
hdfs.allowSnapshot(sub2Bar);
Assert.fail();
} catch (SnapshotException se) {
assertNestedSnapshotException(
se, "ancestor");
}
}
static void assertNestedSnapshotException(SnapshotException se, String substring) {
Assert.assertTrue(se.getMessage().startsWith(
"Nested snapshottable directories not allowed"));
Assert.assertTrue(se.getMessage().contains(substring));
}
private static void print(String message) throws UnresolvedLinkException {
SnapshotTestHelper.dumpTree(message, cluster);
}
private static void assertFile(Path s1, Path s2, Path file,
Boolean... expected) throws IOException {
final Path[] paths = {
file,
new Path(s1, "bar/" + file.getName()),
new Path(s2, file.getName())
};
Assert.assertEquals(expected.length, paths.length);
for(int i = 0; i < paths.length; i++) {
final boolean computed = hdfs.exists(paths[i]);
Assert.assertEquals("Failed on " + paths[i], expected[i], computed);
}
}
/**
* Test the snapshot limit of a single snapshottable directory.
* @throws Exception
*/
@Test (timeout=300000)
public void testSnapshotLimit() throws Exception {
final int step = 1000;
final String dirStr = "/testSnapshotLimit/dir";
final Path dir = new Path(dirStr);
hdfs.mkdirs(dir, new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
int s = 0;
for(; s < SNAPSHOT_LIMIT; s++) {
final String snapshotName = "s" + s;
hdfs.createSnapshot(dir, snapshotName);
//create a file occasionally
if (s % step == 0) {
final Path file = new Path(dirStr, "f" + s);
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
}
}
try {
hdfs.createSnapshot(dir, "s" + s);
Assert.fail("Expected to fail to create snapshot, but didn't.");
} catch(IOException ioe) {
SnapshotTestHelper.LOG.info("The exception is expected.", ioe);
}
for(int f = 0; f < SNAPSHOT_LIMIT; f += step) {
final String file = "f" + f;
s = RANDOM.nextInt(step);
for(; s < SNAPSHOT_LIMIT; s += RANDOM.nextInt(step)) {
final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file);
//the file #f exists in snapshot #s iff s > f.
Assert.assertEquals(s > f, hdfs.exists(p));
}
}
}
@Test (timeout=300000)
public void testSnapshotName() throws Exception {
final String dirStr = "/testSnapshotWithQuota/dir";
final Path dir = new Path(dirStr);
hdfs.mkdirs(dir, new FsPermission((short)0777));
hdfs.allowSnapshot(dir);
// set namespace quota
final int NS_QUOTA = 6;
hdfs.setQuota(dir, NS_QUOTA, HdfsConstants.QUOTA_DONT_SET);
// create object to use up the quota.
final Path foo = new Path(dir, "foo");
final Path f1 = new Path(foo, "f1");
DFSTestUtil.createFile(hdfs, f1, BLOCKSIZE, REPLICATION, SEED);
{
//create a snapshot with default snapshot name
final Path snapshotPath = hdfs.createSnapshot(dir);
//check snapshot path and the default snapshot name
final String snapshotName = snapshotPath.getName();
Assert.assertTrue("snapshotName=" + snapshotName, Pattern.matches(
"s\\d\\d\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d\\d\\d\\.\\d\\d\\d",
snapshotName));
final Path parent = snapshotPath.getParent();
Assert.assertEquals(HdfsConstants.DOT_SNAPSHOT_DIR, parent.getName());
Assert.assertEquals(dir, parent.getParent());
}
}
/**
* Test {@link Snapshot#ID_COMPARATOR}.
*/
@Test (timeout=300000)
public void testIdCmp() {
final PermissionStatus perm = PermissionStatus.createImmutable(
"user", "group", FsPermission.createImmutable((short)0));
final INodeDirectory snapshottable = new INodeDirectory(0,
DFSUtil.string2Bytes("foo"), perm, 0L);
snapshottable.addSnapshottableFeature();
final Snapshot[] snapshots = {
new Snapshot(1, "s1", snapshottable),
new Snapshot(1, "s1", snapshottable),
new Snapshot(2, "s2", snapshottable),
new Snapshot(2, "s2", snapshottable),
};
Assert.assertEquals(0, Snapshot.ID_COMPARATOR.compare(null, null));
for(Snapshot s : snapshots) {
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(null, s) > 0);
Assert.assertTrue(Snapshot.ID_COMPARATOR.compare(s, null) < 0);
for(Snapshot t : snapshots) {
final int expected = s.getRoot().getLocalName().compareTo(
t.getRoot().getLocalName());
final int computed = Snapshot.ID_COMPARATOR.compare(s, t);
Assert.assertEquals(expected > 0, computed > 0);
Assert.assertEquals(expected == 0, computed == 0);
Assert.assertEquals(expected < 0, computed < 0);
}
}
}
/**
* When we have nested snapshottable directories and if we try to reset the
* snapshottable descendant back to an regular directory, we need to replace
* the snapshottable descendant with an INodeDirectoryWithSnapshot
*/
@Test
public void testDisallowNestedSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path dir = new Path("/dir");
final Path sub = new Path(dir, "sub");
hdfs.mkdirs(sub);
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
final Path file = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED);
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INode subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
hdfs.allowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.isDirectory() && subNode.asDirectory().isSnapshottable());
hdfs.disallowSnapshot(sub);
subNode = fsdir.getINode(sub.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
}
}
| 11,770
| 34.454819
| 107
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestINodeFileUnderConstructionWithSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClientAdapter;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test snapshot functionalities while file appending.
*/
public class TestINodeFileUnderConstructionWithSnapshot {
{
((Log4JLogger)INode.LOG).getLogger().setLevel(Level.ALL);
SnapshotTestHelper.disableLogs();
}
static final long seed = 0;
static final short REPLICATION = 3;
static final int BLOCKSIZE = 1024;
private final Path dir = new Path("/TestSnapshot");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
FSDirectory fsdir;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test snapshot after file appending
*/
@Test (timeout=60000)
public void testSnapshotAfterAppending() throws Exception {
Path file = new Path(dir, "file");
// 1. create snapshot --> create file --> append
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
// 2. create snapshot --> modify the file --> append
hdfs.createSnapshot(dir, "s1");
hdfs.setReplication(file, (short) (REPLICATION - 1));
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
// check corresponding inodes
fileNode = (INodeFile) fsdir.getINode(file.toString());
assertEquals(REPLICATION - 1, fileNode.getFileReplication());
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize());
// 3. create snapshot --> append
hdfs.createSnapshot(dir, "s2");
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE);
// check corresponding inodes
fileNode = (INodeFile) fsdir.getINode(file.toString());
assertEquals(REPLICATION - 1, fileNode.getFileReplication());
assertEquals(BLOCKSIZE * 4, fileNode.computeFileSize());
}
private HdfsDataOutputStream appendFileWithoutClosing(Path file, int length)
throws IOException {
byte[] toAppend = new byte[length];
Random random = new Random();
random.nextBytes(toAppend);
HdfsDataOutputStream out = (HdfsDataOutputStream) hdfs.append(file);
out.write(toAppend);
return out;
}
/**
* Test snapshot during file appending, before the corresponding
* {@link FSDataOutputStream} instance closes.
*/
@Test (timeout=60000)
public void testSnapshotWhileAppending() throws Exception {
Path file = new Path(dir, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
// 1. append without closing stream --> create snapshot
HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
out.close();
// check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
// deleted list, with size BLOCKSIZE*2
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
DirectoryDiff last = dirNode.getDiffs().getLast();
// 2. append without closing stream
out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// re-check nodeInDeleted_S0
dirNode = fsdir.getINode(dir.toString()).asDirectory();
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
// 3. take snapshot --> close stream
hdfs.createSnapshot(dir, "s1");
out.close();
// check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
// have been stored in s1's deleted list
fileNode = (INodeFile) fsdir.getINode(file.toString());
dirNode = fsdir.getINode(dir.toString()).asDirectory();
last = dirNode.getDiffs().getLast();
assertTrue(fileNode.isWithSnapshot());
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
// 4. modify file --> append without closing stream --> take snapshot -->
// close stream
hdfs.setReplication(file, (short) (REPLICATION - 1));
out = appendFileWithoutClosing(file, BLOCKSIZE);
hdfs.createSnapshot(dir, "s2");
out.close();
// re-check the size of nodeInDeleted_S1
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
}
/**
* call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
* blocks within the size range are returned.
*/
@Test
public void testGetBlockLocations() throws Exception {
final Path root = new Path("/");
final Path file = new Path("/file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
// take a snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
final Path fileInSnapshot = SnapshotTestHelper.getSnapshotPath(root,
"s1", file.getName());
FileStatus status = hdfs.getFileStatus(fileInSnapshot);
// make sure we record the size for the file
assertEquals(BLOCKSIZE, status.getLen());
// append data to file
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE - 1);
status = hdfs.getFileStatus(fileInSnapshot);
// the size of snapshot file should still be BLOCKSIZE
assertEquals(BLOCKSIZE, status.getLen());
// the size of the file should be (2 * BLOCKSIZE - 1)
status = hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
// call DFSClient#callGetBlockLocations for the file in snapshot
LocatedBlocks blocks = DFSClientAdapter.callGetBlockLocations(
cluster.getNameNodeRpc(), fileInSnapshot.toString(), 0, Long.MAX_VALUE);
List<LocatedBlock> blockList = blocks.getLocatedBlocks();
// should be only one block
assertEquals(BLOCKSIZE, blocks.getFileLength());
assertEquals(1, blockList.size());
// check the last block
LocatedBlock lastBlock = blocks.getLastLocatedBlock();
assertEquals(0, lastBlock.getStartOffset());
assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
// take another snapshot
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root,
"s2", file.getName());
// append data to file without closing
HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
status = hdfs.getFileStatus(fileInSnapshot2);
// the size of snapshot file should be BLOCKSIZE*2-1
assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
// the size of the file should be (3 * BLOCKSIZE - 1)
status = hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
assertFalse(blocks.isUnderConstruction());
assertTrue(blocks.isLastBlockComplete());
blockList = blocks.getLocatedBlocks();
// should be 2 blocks
assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
assertEquals(2, blockList.size());
// check the last block
lastBlock = blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
fileInSnapshot2.toString(), BLOCKSIZE, 0);
blockList = blocks.getLocatedBlocks();
assertEquals(1, blockList.size());
// check blocks for file being written
blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(),
file.toString(), 0, Long.MAX_VALUE);
blockList = blocks.getLocatedBlocks();
assertEquals(3, blockList.size());
assertTrue(blocks.isUnderConstruction());
assertFalse(blocks.isLastBlockComplete());
lastBlock = blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
out.close();
}
@Test
public void testLease() throws Exception {
try {
NameNodeAdapter.setLeasePeriod(fsn, 100, 200);
final Path foo = new Path(dir, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0);
HdfsDataOutputStream out = appendFileWithoutClosing(bar, 100);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
hdfs.delete(foo, true);
Thread.sleep(1000);
try {
fsn.writeLock();
NameNodeAdapter.getLeaseManager(fsn).runLeaseChecks();
} finally {
fsn.writeUnlock();
}
} finally {
NameNodeAdapter.setLeasePeriod(
fsn,
HdfsServerConstants.LEASE_SOFTLIMIT_PERIOD,
HdfsServerConstants.LEASE_HARDLIMIT_PERIOD);
}
}
}
| 11,880
| 37.202572
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import java.util.HashMap;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Tests snapshot deletion.
*/
public class TestSnapshotDiffReport {
protected static final long seed = 0;
protected static final short REPLICATION = 3;
protected static final short REPLICATION_1 = 2;
protected static final long BLOCKSIZE = 1024;
public static final int SNAPSHOTNUMBER = 10;
private final Path dir = new Path("/TestSnapshot");
private final Path sub1 = new Path(dir, "sub1");
protected Configuration conf;
protected MiniDFSCluster cluster;
protected DistributedFileSystem hdfs;
private final HashMap<Path, Integer> snapshotNumberMap = new HashMap<Path, Integer>();
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.format(true).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
private String genSnapshotName(Path snapshotDir) {
int sNum = -1;
if (snapshotNumberMap.containsKey(snapshotDir)) {
sNum = snapshotNumberMap.get(snapshotDir);
}
snapshotNumberMap.put(snapshotDir, ++sNum);
return "s" + sNum;
}
/**
* Create/modify/delete files under a given directory, also create snapshots
* of directories.
*/
private void modifyAndCreateSnapshot(Path modifyDir, Path[] snapshotDirs)
throws Exception {
Path file10 = new Path(modifyDir, "file10");
Path file11 = new Path(modifyDir, "file11");
Path file12 = new Path(modifyDir, "file12");
Path file13 = new Path(modifyDir, "file13");
Path link13 = new Path(modifyDir, "link13");
Path file14 = new Path(modifyDir, "file14");
Path file15 = new Path(modifyDir, "file15");
DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, seed);
// create link13
hdfs.createSymlink(file13, link13, false);
// create snapshot
for (Path snapshotDir : snapshotDirs) {
hdfs.allowSnapshot(snapshotDir);
hdfs.createSnapshot(snapshotDir, genSnapshotName(snapshotDir));
}
// delete file11
hdfs.delete(file11, true);
// modify file12
hdfs.setReplication(file12, REPLICATION);
// modify file13
hdfs.setReplication(file13, REPLICATION);
// delete link13
hdfs.delete(link13, false);
// create file14
DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, seed);
// create file15
DFSTestUtil.createFile(hdfs, file15, BLOCKSIZE, REPLICATION, seed);
// create snapshot
for (Path snapshotDir : snapshotDirs) {
hdfs.createSnapshot(snapshotDir, genSnapshotName(snapshotDir));
}
// create file11 again
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION, seed);
// delete file12
hdfs.delete(file12, true);
// modify file13
hdfs.setReplication(file13, (short) (REPLICATION - 2));
// create link13 again
hdfs.createSymlink(file13, link13, false);
// delete file14
hdfs.delete(file14, true);
// modify file15
hdfs.setReplication(file15, (short) (REPLICATION - 1));
// create snapshot
for (Path snapshotDir : snapshotDirs) {
hdfs.createSnapshot(snapshotDir, genSnapshotName(snapshotDir));
}
// modify file10
hdfs.setReplication(file10, (short) (REPLICATION + 1));
}
/** check the correctness of the diff reports */
private void verifyDiffReport(Path dir, String from, String to,
DiffReportEntry... entries) throws IOException {
SnapshotDiffReport report = hdfs.getSnapshotDiffReport(dir, from, to);
// reverse the order of from and to
SnapshotDiffReport inverseReport = hdfs
.getSnapshotDiffReport(dir, to, from);
System.out.println(report.toString());
System.out.println(inverseReport.toString() + "\n");
assertEquals(entries.length, report.getDiffList().size());
assertEquals(entries.length, inverseReport.getDiffList().size());
for (DiffReportEntry entry : entries) {
if (entry.getType() == DiffType.MODIFY) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(entry));
} else if (entry.getType() == DiffType.DELETE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
new DiffReportEntry(DiffType.CREATE, entry.getSourcePath())));
} else if (entry.getType() == DiffType.CREATE) {
assertTrue(report.getDiffList().contains(entry));
assertTrue(inverseReport.getDiffList().contains(
new DiffReportEntry(DiffType.DELETE, entry.getSourcePath())));
}
}
}
/** Test the computation and representation of diff between snapshots */
@Test (timeout=60000)
public void testDiffReport() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
Path subsub1 = new Path(sub1, "subsub1");
Path subsubsub1 = new Path(subsub1, "subsubsub1");
hdfs.mkdirs(subsubsub1);
modifyAndCreateSnapshot(sub1, new Path[]{sub1, subsubsub1});
modifyAndCreateSnapshot(subsubsub1, new Path[]{sub1, subsubsub1});
try {
hdfs.getSnapshotDiffReport(subsub1, "s1", "s2");
fail("Expect exception when getting snapshot diff report: " + subsub1
+ " is not a snapshottable directory.");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory: " + subsub1, e);
}
final String invalidName = "invalid";
try {
hdfs.getSnapshotDiffReport(sub1, invalidName, invalidName);
fail("Expect exception when providing invalid snapshot name for diff report");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Cannot find the snapshot of directory " + sub1 + " with name "
+ invalidName, e);
}
// diff between the same snapshot
SnapshotDiffReport report = hdfs.getSnapshotDiffReport(sub1, "s0", "s0");
System.out.println(report);
assertEquals(0, report.getDiffList().size());
report = hdfs.getSnapshotDiffReport(sub1, "", "");
System.out.println(report);
assertEquals(0, report.getDiffList().size());
report = hdfs.getSnapshotDiffReport(subsubsub1, "s0", "s2");
System.out.println(report);
assertEquals(0, report.getDiffList().size());
// test path with scheme also works
report = hdfs.getSnapshotDiffReport(hdfs.makeQualified(subsubsub1), "s0", "s2");
System.out.println(report);
assertEquals(0, report.getDiffList().size());
verifyDiffReport(sub1, "s0", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")));
verifyDiffReport(sub1, "s0", "s5",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file15")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file12")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("file11")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file13")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("link13")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
verifyDiffReport(sub1, "s2", "s5",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("file10")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")));
verifyDiffReport(sub1, "s3", "",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file12")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1/file10")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")));
}
/**
* Make changes under a sub-directory, then delete the sub-directory. Make
* sure the diff report computation correctly retrieve the diff from the
* deleted sub-directory.
*/
@Test (timeout=60000)
public void testDiffReport2() throws Exception {
Path subsub1 = new Path(sub1, "subsub1");
Path subsubsub1 = new Path(subsub1, "subsubsub1");
hdfs.mkdirs(subsubsub1);
modifyAndCreateSnapshot(subsubsub1, new Path[]{sub1});
// delete subsub1
hdfs.delete(subsub1, true);
// check diff report between s0 and s2
verifyDiffReport(sub1, "s0", "s2",
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file15")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file12")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/file11")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("subsub1/subsubsub1/file13")),
new DiffReportEntry(DiffType.CREATE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("subsub1/subsubsub1/link13")));
// check diff report between s0 and the current status
verifyDiffReport(sub1, "s0", "",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("subsub1")));
}
/**
* Rename a directory to its prior descendant, and verify the diff report.
*/
@Test
public void testDiffReportWithRename() throws Exception {
final Path root = new Path("/");
final Path sdir1 = new Path(root, "dir1");
final Path sdir2 = new Path(root, "dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
hdfs.mkdirs(bar);
hdfs.mkdirs(sdir2);
// create snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
// /dir1/foo/bar -> /dir2/bar
final Path bar2 = new Path(sdir2, "bar");
hdfs.rename(bar, bar2);
// /dir1/foo -> /dir2/bar/foo
final Path foo2 = new Path(bar2, "foo");
hdfs.rename(foo, foo2);
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
// let's delete /dir2 to make things more complicated
hdfs.delete(sdir2, true);
verifyDiffReport(root, "s1", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("dir1/foo"),
DFSUtil.string2Bytes("dir2/bar/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.MODIFY,
DFSUtil.string2Bytes("dir1/foo/bar")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.RENAME, DFSUtil
.string2Bytes("dir1/foo/bar"), DFSUtil.string2Bytes("dir2/bar")));
}
/**
* Rename a file/dir outside of the snapshottable dir should be reported as
* deleted. Rename a file/dir from outside should be reported as created.
*/
@Test
public void testDiffReportWithRenameOutside() throws Exception {
final Path root = new Path("/");
final Path dir1 = new Path(root, "dir1");
final Path dir2 = new Path(root, "dir2");
final Path foo = new Path(dir1, "foo");
final Path fileInFoo = new Path(foo, "file");
final Path bar = new Path(dir2, "bar");
final Path fileInBar = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);
// create snapshot on /dir1
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s0");
// move bar into dir1
final Path newBar = new Path(dir1, "newBar");
hdfs.rename(bar, newBar);
// move foo out of dir1 into dir2
final Path newFoo = new Path(dir2, "new");
hdfs.rename(foo, newFoo);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
verifyDiffReport(dir1, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes(newBar
.getName())),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes(foo.getName())));
}
/**
* Renaming a file/dir then delete the ancestor dir of the rename target
* should be reported as deleted.
*/
@Test
public void testDiffReportWithRenameAndDelete() throws Exception {
final Path root = new Path("/");
final Path dir1 = new Path(root, "dir1");
final Path dir2 = new Path(root, "dir2");
final Path foo = new Path(dir1, "foo");
final Path fileInFoo = new Path(foo, "file");
final Path bar = new Path(dir2, "bar");
final Path fileInBar = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
hdfs.rename(fileInFoo, fileInBar, Rename.OVERWRITE);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2/bar")),
new DiffReportEntry(DiffType.DELETE, DFSUtil
.string2Bytes("dir2/bar/file")),
new DiffReportEntry(DiffType.RENAME,
DFSUtil.string2Bytes("dir1/foo/file"),
DFSUtil.string2Bytes("dir2/bar/file")));
// delete bar
hdfs.delete(bar, true);
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
verifyDiffReport(root, "s0", "s2",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir1/foo")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("dir2")),
new DiffReportEntry(DiffType.DELETE, DFSUtil.string2Bytes("dir2/bar")),
new DiffReportEntry(DiffType.DELETE,
DFSUtil.string2Bytes("dir1/foo/file")));
}
@Test
public void testDiffReportWithRenameToNewDir() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
final Path fileInFoo = new Path(foo, "file");
DFSTestUtil.createFile(hdfs, fileInFoo, BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
final Path bar = new Path(root, "bar");
hdfs.mkdirs(bar);
final Path fileInBar = new Path(bar, "file");
hdfs.rename(fileInFoo, fileInBar);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
new DiffReportEntry(DiffType.CREATE, DFSUtil.string2Bytes("bar")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo/file"),
DFSUtil.string2Bytes("bar/file")));
}
/**
* Rename a file and then append some data to it
*/
@Test
public void testDiffReportWithRenameAndAppend() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
final Path bar = new Path(root, "bar");
hdfs.rename(foo, bar);
DFSTestUtil.appendFile(hdfs, bar, 10); // append 10 bytes
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
// we always put modification on the file before rename
verifyDiffReport(root, "s0", "s1",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo"),
DFSUtil.string2Bytes("bar")));
}
/**
* Nested renamed dir/file and the withNameList in the WithCount node of the
* parental directory is empty due to snapshot deletion. See HDFS-6996 for
* details.
*/
@Test
public void testDiffReportWithRenameAndSnapshotDeletion() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
// rename /foo to /foo2
final Path foo2 = new Path(root, "foo2");
hdfs.rename(foo, foo2);
// now /foo/bar becomes /foo2/bar
final Path bar2 = new Path(foo2, "bar");
// delete snapshot s0 so that the withNameList inside of the WithCount node
// of foo becomes empty
hdfs.deleteSnapshot(root, "s0");
// create snapshot s1 and rename bar again
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
final Path bar3 = new Path(foo2, "bar-new");
hdfs.rename(bar2, bar3);
// we always put modification on the file before rename
verifyDiffReport(root, "s1", "",
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("")),
new DiffReportEntry(DiffType.MODIFY, DFSUtil.string2Bytes("foo2")),
new DiffReportEntry(DiffType.RENAME, DFSUtil.string2Bytes("foo2/bar"),
DFSUtil.string2Bytes("foo2/bar-new")));
}
}
| 22,294
| 40.986817
| 88
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileContextSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestFileContextSnapshot {
private static final short REPLICATION = 3;
private static final int BLOCKSIZE = 1024;
private static final long SEED = 0;
private Configuration conf;
private MiniDFSCluster cluster;
private FileContext fileContext;
private DistributedFileSystem dfs;
private final String snapshotRoot = "/snapshot";
private final Path filePath = new Path(snapshotRoot, "file1");
private Path snapRootPath;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fileContext = FileContext.getFileContext(conf);
dfs = (DistributedFileSystem) cluster.getFileSystem();
snapRootPath = new Path(snapshotRoot);
dfs.mkdirs(snapRootPath);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test(timeout = 60000)
public void testCreateAndDeleteSnapshot() throws Exception {
DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
// disallow snapshot on dir
dfs.disallowSnapshot(snapRootPath);
try {
fileContext.createSnapshot(snapRootPath, "s1");
} catch (SnapshotException e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory: " + snapRootPath, e);
}
// allow snapshot on dir
dfs.allowSnapshot(snapRootPath);
Path ssPath = fileContext.createSnapshot(snapRootPath, "s1");
assertTrue("Failed to create snapshot", dfs.exists(ssPath));
fileContext.deleteSnapshot(snapRootPath, "s1");
assertFalse("Failed to delete snapshot", dfs.exists(ssPath));
}
/**
* Test FileStatus of snapshot file before/after rename
*/
@Test(timeout = 60000)
public void testRenameSnapshot() throws Exception {
DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
dfs.allowSnapshot(snapRootPath);
// Create snapshot for sub1
Path snapPath1 = fileContext.createSnapshot(snapRootPath, "s1");
Path ssPath = new Path(snapPath1, filePath.getName());
assertTrue("Failed to create snapshot", dfs.exists(ssPath));
FileStatus statusBeforeRename = dfs.getFileStatus(ssPath);
// Rename the snapshot
fileContext.renameSnapshot(snapRootPath, "s1", "s2");
// <sub1>/.snapshot/s1/file1 should no longer exist
assertFalse("Old snapshot still exists after rename!", dfs.exists(ssPath));
Path snapshotRoot = SnapshotTestHelper.getSnapshotRoot(snapRootPath, "s2");
ssPath = new Path(snapshotRoot, filePath.getName());
// Instead, <sub1>/.snapshot/s2/file1 should exist
assertTrue("Snapshot doesn't exists!", dfs.exists(ssPath));
FileStatus statusAfterRename = dfs.getFileStatus(ssPath);
// FileStatus of the snapshot should not change except the path
assertFalse("Filestatus of the snapshot matches",
statusBeforeRename.equals(statusAfterRename));
statusBeforeRename.setPath(statusAfterRename.getPath());
assertEquals("FileStatus of the snapshot mismatches!",
statusBeforeRename.toString(), statusAfterRename.toString());
}
}
| 4,792
| 37.653226
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotReplication.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.util.HashMap;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* This class tests the replication handling/calculation of snapshots. In
* particular, {@link INodeFile#getFileReplication()} and
* {@link INodeFile#getPreferredBlockReplication()} are tested to make sure
* the number of replication is calculated correctly with/without snapshots.
*/
public class TestSnapshotReplication {
private static final long seed = 0;
private static final short REPLICATION = 3;
private static final int NUMDATANODE = 5;
private static final long BLOCKSIZE = 1024;
private final Path dir = new Path("/TestSnapshot");
private final Path sub1 = new Path(dir, "sub1");
private final Path file1 = new Path(sub1, "file1");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
FSDirectory fsdir;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(NUMDATANODE)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
fsdir = fsn.getFSDirectory();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Check the replication of a given file. We test both
* {@link INodeFile#getFileReplication()} and
* {@link INodeFile#getPreferredBlockReplication()}.
*
* @param file The given file
* @param replication The expected replication number
* @param blockReplication The expected replication number for the block
* @throws Exception
*/
private void checkFileReplication(Path file, short replication,
short blockReplication) throws Exception {
// Get FileStatus of file1, and identify the replication number of file1.
// Note that the replication number in FileStatus was derived from
// INodeFile#getFileReplication().
short fileReplication = hdfs.getFileStatus(file1).getReplication();
assertEquals(replication, fileReplication);
// Check the correctness of getPreferredBlockReplication()
INode inode = fsdir.getINode(file1.toString());
assertTrue(inode instanceof INodeFile);
assertEquals(blockReplication,
((INodeFile) inode).getPreferredBlockReplication());
}
/**
* Test replication number calculation for a normal file without snapshots.
*/
@Test (timeout=60000)
public void testReplicationWithoutSnapshot() throws Exception {
// Create file1, set its replication to REPLICATION
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Check the replication of file1
checkFileReplication(file1, REPLICATION, REPLICATION);
// Change the replication factor of file1 from 3 to 2
hdfs.setReplication(file1, (short) (REPLICATION - 1));
// Check the replication again
checkFileReplication(file1, (short) (REPLICATION - 1),
(short) (REPLICATION - 1));
}
INodeFile getINodeFile(Path p) throws Exception {
final String s = p.toString();
return INodeFile.valueOf(fsdir.getINode(s), s);
}
/**
* Check the replication for both the current file and all its prior snapshots
*
* @param currentFile
* the Path of the current file
* @param snapshotRepMap
* A map maintaining all the snapshots of the current file, as well
* as their expected replication number stored in their corresponding
* INodes
* @param expectedBlockRep
* The expected replication number
* @throws Exception
*/
private void checkSnapshotFileReplication(Path currentFile,
Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
// First check the getPreferredBlockReplication for the INode of
// the currentFile
final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
assertEquals(expectedBlockRep,
inodeOfCurrentFile.getPreferredBlockReplication());
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), true);
final INodeFile ssInode = iip.getLastINode().asFile();
// The replication number derived from the
// INodeFileWithLink#getPreferredBlockReplication should
// always == expectedBlockRep
assertEquals(expectedBlockRep, ssInode.getPreferredBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication(iip.getPathSnapshotId()));
}
}
/**
* Test replication number calculation for a file with snapshots.
*/
@Test (timeout=60000)
public void testReplicationWithSnapshot() throws Exception {
short fileRep = 1;
// Create file1, set its replication to 1
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, fileRep, seed);
Map<Path, Short> snapshotRepMap = new HashMap<Path, Short>();
// Change replication factor from 1 to 5. In the meanwhile, keep taking
// snapshots for sub1
for (; fileRep < NUMDATANODE; ) {
// Create snapshot for sub1
Path snapshotRoot = SnapshotTestHelper.createSnapshot(hdfs, sub1, "s"
+ fileRep);
Path snapshot = new Path(snapshotRoot, file1.getName());
// Check the replication stored in the INode of the snapshot of file1
assertEquals(fileRep, getINodeFile(snapshot).getFileReplication());
snapshotRepMap.put(snapshot, fileRep);
// Increase the replication factor by 1
hdfs.setReplication(file1, ++fileRep);
// Check the replication for file1
checkFileReplication(file1, fileRep, fileRep);
// Also check the replication for all the prior snapshots of file1
checkSnapshotFileReplication(file1, snapshotRepMap, fileRep);
}
// Change replication factor back to 3.
hdfs.setReplication(file1, REPLICATION);
// Check the replication for file1
// Currently the max replication among snapshots should be 4
checkFileReplication(file1, REPLICATION, (short) (NUMDATANODE - 1));
// Also check the replication for all the prior snapshots of file1.
// Currently the max replication among snapshots should be 4
checkSnapshotFileReplication(file1, snapshotRepMap,
(short) (NUMDATANODE - 1));
}
/**
* Test replication for a file with snapshots, also including the scenario
* where the original file is deleted
*/
@Test (timeout=60000)
public void testReplicationAfterDeletion() throws Exception {
// Create file1, set its replication to 3
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
Map<Path, Short> snapshotRepMap = new HashMap<Path, Short>();
// Take 3 snapshots of sub1
for (int i = 1; i <= 3; i++) {
Path root = SnapshotTestHelper.createSnapshot(hdfs, sub1, "s" + i);
Path ssFile = new Path(root, file1.getName());
snapshotRepMap.put(ssFile, REPLICATION);
}
// Check replication
checkFileReplication(file1, REPLICATION, REPLICATION);
checkSnapshotFileReplication(file1, snapshotRepMap, REPLICATION);
// Delete file1
hdfs.delete(file1, true);
// Check replication of snapshots
for (Path ss : snapshotRepMap.keySet()) {
final INodeFile ssInode = getINodeFile(ss);
// The replication number derived from the
// INodeFileWithLink#getPreferredBlockReplication should
// always == expectedBlockRep
assertEquals(REPLICATION, ssInode.getPreferredBlockReplication());
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(),
ssInode.getFileReplication());
}
}
}
| 9,342
| 38.757447
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotStatsMXBean.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.lang.management.ManagementFactory;
import java.lang.reflect.Array;
import javax.management.MBeanServer;
import javax.management.ObjectName;
import javax.management.openmbean.CompositeData;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test;
public class TestSnapshotStatsMXBean {
/**
* Test getting SnapshotStatsMXBean information
*/
@Test
public void testSnapshotStatsMXBeanInfo() throws Exception {
Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
String pathName = "/snapshot";
Path path = new Path(pathName);
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
SnapshotManager sm = cluster.getNamesystem().getSnapshotManager();
DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
dfs.mkdirs(path);
dfs.allowSnapshot(path);
dfs.createSnapshot(path);
MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
ObjectName mxbeanName = new ObjectName(
"Hadoop:service=NameNode,name=SnapshotInfo");
CompositeData[] directories =
(CompositeData[]) mbs.getAttribute(
mxbeanName, "SnapshottableDirectories");
int numDirectories = Array.getLength(directories);
assertEquals(sm.getNumSnapshottableDirs(), numDirectories);
CompositeData[] snapshots =
(CompositeData[]) mbs.getAttribute(mxbeanName, "Snapshots");
int numSnapshots = Array.getLength(snapshots);
assertEquals(sm.getNumSnapshots(), numSnapshots);
CompositeData d = (CompositeData) Array.get(directories, 0);
CompositeData s = (CompositeData) Array.get(snapshots, 0);
assertTrue(((String) d.get("path")).contains(pathName));
assertTrue(((String) s.get("snapshotDirectory")).contains(pathName));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
}
| 3,036
| 35.590361
| 82
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestSnapshottableDirListing {
static final long seed = 0;
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1024;
private final Path root = new Path("/");
private final Path dir1 = new Path("/TestSnapshot1");
private final Path dir2 = new Path("/TestSnapshot2");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test listing all the snapshottable directories
*/
@Test (timeout=60000)
public void testListSnapshottableDir() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
// Initially there is no snapshottable directories in the system
SnapshottableDirectoryStatus[] dirs = hdfs.getSnapshottableDirListing();
assertNull(dirs);
// Make root as snapshottable
final Path root = new Path("/");
hdfs.allowSnapshot(root);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals("", dirs[0].getDirStatus().getLocalName());
assertEquals(root, dirs[0].getFullPath());
// Make root non-snaphsottable
hdfs.disallowSnapshot(root);
dirs = hdfs.getSnapshottableDirListing();
assertNull(dirs);
// Make dir1 as snapshottable
hdfs.allowSnapshot(dir1);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir1, dirs[0].getFullPath());
// There is no snapshot for dir1 yet
assertEquals(0, dirs[0].getSnapshotNumber());
// Make dir2 as snapshottable
hdfs.allowSnapshot(dir2);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(2, dirs.length);
assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2.getName(), dirs[1].getDirStatus().getLocalName());
assertEquals(dir2, dirs[1].getFullPath());
// There is no snapshot for dir2 yet
assertEquals(0, dirs[1].getSnapshotNumber());
// Create dir3
final Path dir3 = new Path("/TestSnapshot3");
hdfs.mkdirs(dir3);
// Rename dir3 to dir2
hdfs.rename(dir3, dir2, Rename.OVERWRITE);
// Now we only have one snapshottable dir: dir1
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
// Make dir2 snapshottable again
hdfs.allowSnapshot(dir2);
// Create a snapshot for dir2
hdfs.createSnapshot(dir2, "s1");
hdfs.createSnapshot(dir2, "s2");
dirs = hdfs.getSnapshottableDirListing();
// There are now 2 snapshots for dir2
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(2, dirs[1].getSnapshotNumber());
// Create sub-dirs under dir1
Path sub1 = new Path(dir1, "sub1");
Path file1 = new Path(sub1, "file1");
Path sub2 = new Path(dir1, "sub2");
Path file2 = new Path(sub2, "file2");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
// Make sub1 and sub2 snapshottable
hdfs.allowSnapshot(sub1);
hdfs.allowSnapshot(sub2);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(4, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(sub1, dirs[2].getFullPath());
assertEquals(sub2, dirs[3].getFullPath());
// reset sub1
hdfs.disallowSnapshot(sub1);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(3, dirs.length);
assertEquals(dir1, dirs[0].getFullPath());
assertEquals(dir2, dirs[1].getFullPath());
assertEquals(sub2, dirs[2].getFullPath());
// Remove dir1, both dir1 and sub2 will be removed
hdfs.delete(dir1, true);
dirs = hdfs.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(dir2.getName(), dirs[0].getDirStatus().getLocalName());
assertEquals(dir2, dirs[0].getFullPath());
}
/**
* Test the listing with different user names to make sure only directories
* that are owned by the user are listed.
*/
@Test (timeout=60000)
public void testListWithDifferentUser() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
// first make dir1 and dir2 snapshottable
hdfs.allowSnapshot(dir1);
hdfs.allowSnapshot(dir2);
hdfs.setPermission(root, FsPermission.valueOf("-rwxrwxrwx"));
// create two dirs and make them snapshottable under the name of user1
UserGroupInformation ugi1 = UserGroupInformation.createUserForTesting(
"user1", new String[] { "group1" });
DistributedFileSystem fs1 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(ugi1, conf);
Path dir1_user1 = new Path("/dir1_user1");
Path dir2_user1 = new Path("/dir2_user1");
fs1.mkdirs(dir1_user1);
fs1.mkdirs(dir2_user1);
hdfs.allowSnapshot(dir1_user1);
hdfs.allowSnapshot(dir2_user1);
// user2
UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting(
"user2", new String[] { "group2" });
DistributedFileSystem fs2 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(ugi2, conf);
Path dir_user2 = new Path("/dir_user2");
Path subdir_user2 = new Path(dir_user2, "subdir");
fs2.mkdirs(dir_user2);
fs2.mkdirs(subdir_user2);
hdfs.allowSnapshot(dir_user2);
hdfs.allowSnapshot(subdir_user2);
// super user
String supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
UserGroupInformation superUgi = UserGroupInformation.createUserForTesting(
"superuser", new String[] { supergroup });
DistributedFileSystem fs3 = (DistributedFileSystem) DFSTestUtil
.getFileSystemAs(superUgi, conf);
// list the snapshottable dirs for superuser
SnapshottableDirectoryStatus[] dirs = fs3.getSnapshottableDirListing();
// 6 snapshottable dirs: dir1, dir2, dir1_user1, dir2_user1, dir_user2, and
// subdir_user2
assertEquals(6, dirs.length);
// list the snapshottable dirs for user1
dirs = fs1.getSnapshottableDirListing();
// 2 dirs owned by user1: dir1_user1 and dir2_user1
assertEquals(2, dirs.length);
assertEquals(dir1_user1, dirs[0].getFullPath());
assertEquals(dir2_user1, dirs[1].getFullPath());
// list the snapshottable dirs for user2
dirs = fs2.getSnapshottableDirListing();
// 2 dirs owned by user2: dir_user2 and subdir_user2
assertEquals(2, dirs.length);
assertEquals(dir_user2, dirs[0].getFullPath());
assertEquals(subdir_user2, dirs[1].getFullPath());
}
}
| 8,943
| 36.898305
| 90
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
/**
* Test the snapshot-related metrics
*/
public class TestSnapshotMetrics {
private static final long seed = 0;
private static final short REPLICATION = 3;
private static final String NN_METRICS = "NameNodeActivity";
private static final String NS_METRICS = "FSNamesystem";
private final Path dir = new Path("/TestSnapshot");
private final Path sub1 = new Path(dir, "sub1");
private final Path file1 = new Path(sub1, "file1");
private final Path file2 = new Path(sub1, "file2");
private Configuration conf;
private MiniDFSCluster cluster;
private DistributedFileSystem hdfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(REPLICATION)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
DFSTestUtil.createFile(hdfs, file1, 1024, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 1024, REPLICATION, seed);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test the metric SnapshottableDirectories, AllowSnapshotOps,
* DisallowSnapshotOps, and listSnapshottableDirOps
*/
@Test
public void testSnapshottableDirs() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
assertGauge("SnapshottableDirectories", 0, getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps", 0L, getMetrics(NN_METRICS));
assertCounter("DisallowSnapshotOps", 0L, getMetrics(NN_METRICS));
// Allow snapshots for directories, and check the metrics
hdfs.allowSnapshot(sub1);
assertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps", 1L, getMetrics(NN_METRICS));
Path sub2 = new Path(dir, "sub2");
Path file = new Path(sub2, "file");
DFSTestUtil.createFile(hdfs, file, 1024, REPLICATION, seed);
hdfs.allowSnapshot(sub2);
assertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps", 2L, getMetrics(NN_METRICS));
Path subsub1 = new Path(sub1, "sub1sub1");
Path subfile = new Path(subsub1, "file");
DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed);
hdfs.allowSnapshot(subsub1);
assertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS));
assertCounter("AllowSnapshotOps", 3L, getMetrics(NN_METRICS));
// Set an already snapshottable directory to snapshottable, should not
// change the metrics
hdfs.allowSnapshot(sub1);
assertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS));
// But the number of allowSnapshot operations still increases
assertCounter("AllowSnapshotOps", 4L, getMetrics(NN_METRICS));
// Disallow the snapshot for snapshottable directories, then check the
// metrics again
hdfs.disallowSnapshot(sub1);
assertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS));
assertCounter("DisallowSnapshotOps", 1L, getMetrics(NN_METRICS));
// delete subsub1, snapshottable directories should be 1
hdfs.delete(subsub1, true);
assertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS));
// list all the snapshottable directories
SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
assertEquals(1, status.length);
assertCounter("ListSnapshottableDirOps", 1L, getMetrics(NN_METRICS));
}
/**
* Test the metrics Snapshots, CreateSnapshotOps, DeleteSnapshotOps,
* RenameSnapshotOps
*/
@Test
public void testSnapshots() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
assertGauge("Snapshots", 0, getMetrics(NS_METRICS));
assertCounter("CreateSnapshotOps", 0L, getMetrics(NN_METRICS));
// Create a snapshot for a non-snapshottable directory, thus should not
// change the metrics
try {
hdfs.createSnapshot(sub1, "s1");
} catch (Exception e) {}
assertGauge("Snapshots", 0, getMetrics(NS_METRICS));
assertCounter("CreateSnapshotOps", 1L, getMetrics(NN_METRICS));
// Create snapshot for sub1
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1, "s1");
assertGauge("Snapshots", 1, getMetrics(NS_METRICS));
assertCounter("CreateSnapshotOps", 2L, getMetrics(NN_METRICS));
hdfs.createSnapshot(sub1, "s2");
assertGauge("Snapshots", 2, getMetrics(NS_METRICS));
assertCounter("CreateSnapshotOps", 3L, getMetrics(NN_METRICS));
hdfs.getSnapshotDiffReport(sub1, "s1", "s2");
assertCounter("SnapshotDiffReportOps", 1L, getMetrics(NN_METRICS));
// Create snapshot for a directory under sub1
Path subsub1 = new Path(sub1, "sub1sub1");
Path subfile = new Path(subsub1, "file");
DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed);
hdfs.allowSnapshot(subsub1);
hdfs.createSnapshot(subsub1, "s11");
assertGauge("Snapshots", 3, getMetrics(NS_METRICS));
assertCounter("CreateSnapshotOps", 4L, getMetrics(NN_METRICS));
// delete snapshot
hdfs.deleteSnapshot(sub1, "s2");
assertGauge("Snapshots", 2, getMetrics(NS_METRICS));
assertCounter("DeleteSnapshotOps", 1L, getMetrics(NN_METRICS));
// rename snapshot
hdfs.renameSnapshot(sub1, "s1", "NewS1");
assertGauge("Snapshots", 2, getMetrics(NS_METRICS));
assertCounter("RenameSnapshotOps", 1L, getMetrics(NN_METRICS));
}
}
| 6,997
| 38.314607
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.IOException;
import java.util.EnumSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestOpenFilesWithSnapshot {
private final Configuration conf = new Configuration();
MiniDFSCluster cluster = null;
DistributedFileSystem fs = null;
@Before
public void setup() throws IOException {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
conf.set("dfs.blocksize", "1048576");
fs = cluster.getFileSystem();
}
@After
public void teardown() throws IOException {
if (fs != null) {
fs.close();
}
if (cluster != null) {
cluster.shutdown();
}
}
@Test
public void testUCFileDeleteWithSnapShot() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
// delete files separately
fs.delete(new Path("/test/test/test2"), true);
fs.delete(new Path("/test/test/test3"), true);
cluster.restartNameNode();
}
@Test
public void testParentDirWithUCFileDeleteWithSnapShot() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
// delete parent directory
fs.delete(new Path("/test/test"), true);
cluster.restartNameNode();
}
@Test
public void testWithCheckpoint() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
fs.delete(new Path("/test/test"), true);
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
// read snapshot file after restart
String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
"s1/test/test2");
DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
"s1/test/test3");
DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
@Test
public void testFilesDeletionWithCheckpoint() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
fs.delete(new Path("/test/test/test2"), true);
fs.delete(new Path("/test/test/test3"), true);
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
// read snapshot file after restart
String test2snapshotPath = Snapshot.getSnapshotPath(path.toString(),
"s1/test/test2");
DFSTestUtil.readFile(fs, new Path(test2snapshotPath));
String test3snapshotPath = Snapshot.getSnapshotPath(path.toString(),
"s1/test/test3");
DFSTestUtil.readFile(fs, new Path(test3snapshotPath));
}
private void doWriteAndAbort(DistributedFileSystem fs, Path path)
throws IOException {
fs.mkdirs(path);
fs.allowSnapshot(path);
DFSTestUtil
.createFile(fs, new Path("/test/test1"), 100, (short) 2, 100024L);
DFSTestUtil
.createFile(fs, new Path("/test/test2"), 100, (short) 2, 100024L);
Path file = new Path("/test/test/test2");
FSDataOutputStream out = fs.create(file);
for (int i = 0; i < 2; i++) {
long count = 0;
while (count < 1048576) {
out.writeBytes("hell");
count += 4;
}
}
((DFSOutputStream) out.getWrappedStream()).hsync(EnumSet
.of(SyncFlag.UPDATE_LENGTH));
DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream());
Path file2 = new Path("/test/test/test3");
FSDataOutputStream out2 = fs.create(file2);
for (int i = 0; i < 2; i++) {
long count = 0;
while (count < 1048576) {
out2.writeBytes("hell");
count += 4;
}
}
((DFSOutputStream) out2.getWrappedStream()).hsync(EnumSet
.of(SyncFlag.UPDATE_LENGTH));
DFSTestUtil.abortStream((DFSOutputStream) out2.getWrappedStream());
fs.createSnapshot(path, "s1");
}
@Test
public void testOpenFilesWithMultipleSnapshots() throws Exception {
doTestMultipleSnapshots(true);
}
@Test
public void testOpenFilesWithMultipleSnapshotsWithoutCheckpoint()
throws Exception {
doTestMultipleSnapshots(false);
}
private void doTestMultipleSnapshots(boolean saveNamespace)
throws IOException {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
fs.createSnapshot(path, "s2");
fs.delete(new Path("/test/test"), true);
fs.deleteSnapshot(path, "s2");
cluster.triggerBlockReports();
if (saveNamespace) {
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
}
cluster.restartNameNode(true);
}
@Test
public void testOpenFilesWithRename() throws Exception {
Path path = new Path("/test");
doWriteAndAbort(fs, path);
// check for zero sized blocks
Path fileWithEmptyBlock = new Path("/test/test/test4");
fs.create(fileWithEmptyBlock);
NamenodeProtocols nameNodeRpc = cluster.getNameNodeRpc();
String clientName = fs.getClient().getClientName();
// create one empty block
nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
HdfsConstants.GRANDFATHER_INODE_ID, null);
fs.createSnapshot(path, "s2");
fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
fs.delete(new Path("/test/test-renamed"), true);
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
cluster.restartNameNode(true);
}
}
| 7,294
| 33.738095
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.spy;
import java.io.File;
import java.io.IOException;
import java.util.EnumSet;
import java.util.List;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.INodeReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.ChildrenDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import org.mockito.internal.util.reflection.Whitebox;
/** Testing rename with snapshots. */
public class TestRenameWithSnapshots {
static {
SnapshotTestHelper.disableLogs();
}
private static final Log LOG = LogFactory.getLog(TestRenameWithSnapshots.class);
private static final long SEED = 0;
private static final short REPL = 3;
private static final short REPL_1 = 2;
private static final short REPL_2 = 1;
private static final long BLOCKSIZE = 1024;
private static final Configuration conf = new Configuration();
private static MiniDFSCluster cluster;
private static FSNamesystem fsn;
private static FSDirectory fsdir;
private static DistributedFileSystem hdfs;
private static final String testDir =
System.getProperty("test.build.data", "build/test/data");
static private final Path dir = new Path("/testRenameWithSnapshots");
static private final Path sub1 = new Path(dir, "sub1");
static private final Path file1 = new Path(sub1, "file1");
static private final Path file2 = new Path(sub1, "file2");
static private final Path file3 = new Path(sub1, "file3");
static private final String snap1 = "snap1";
static private final String snap2 = "snap2";
@Before
public void setUp() throws Exception {
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL).format(true)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test (timeout=300000)
public void testRenameFromSDir2NonSDir() throws Exception {
final String dirStr = "/testRenameWithSnapshot";
final String abcStr = dirStr + "/abc";
final Path abc = new Path(abcStr);
hdfs.mkdirs(abc, new FsPermission((short)0777));
hdfs.allowSnapshot(abc);
final Path foo = new Path(abc, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
hdfs.createSnapshot(abc, "s0");
try {
hdfs.rename(abc, new Path(dirStr, "tmp"));
fail("Expect exception since " + abc
+ " is snapshottable and already has snapshots");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(abcStr
+ " is snapshottable and already has snapshots", e);
}
final String xyzStr = dirStr + "/xyz";
final Path xyz = new Path(xyzStr);
hdfs.mkdirs(xyz, new FsPermission((short)0777));
final Path bar = new Path(xyz, "bar");
hdfs.rename(foo, bar);
final INode fooRef = fsdir.getINode(
SnapshotTestHelper.getSnapshotPath(abc, "s0", "foo").toString());
Assert.assertTrue(fooRef.isReference());
Assert.assertTrue(fooRef.asReference() instanceof INodeReference.WithName);
final INodeReference.WithCount withCount
= (INodeReference.WithCount)fooRef.asReference().getReferredINode();
Assert.assertEquals(2, withCount.getReferenceCount());
final INode barRef = fsdir.getINode(bar.toString());
Assert.assertTrue(barRef.isReference());
Assert.assertSame(withCount, barRef.asReference().getReferredINode());
hdfs.delete(bar, false);
Assert.assertEquals(1, withCount.getReferenceCount());
}
private static boolean existsInDiffReport(List<DiffReportEntry> entries,
DiffType type, String sourcePath, String targetPath) {
for (DiffReportEntry entry : entries) {
if (entry.equals(new DiffReportEntry(type, DFSUtil
.string2Bytes(sourcePath), targetPath == null ? null : DFSUtil
.string2Bytes(targetPath)))) {
return true;
}
}
return false;
}
/**
* Rename a file under a snapshottable directory, file does not exist
* in a snapshot.
*/
@Test (timeout=60000)
public void testRenameFileNotInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
hdfs.createSnapshot(sub1, snap1);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED);
hdfs.rename(file1, file2);
// Query the diff report and make sure it looks as expected.
SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
List<DiffReportEntry> entries = diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
assertTrue(existsInDiffReport(entries, DiffType.CREATE, file2.getName(),
null));
}
/**
* Rename a file under a snapshottable directory, file exists
* in a snapshot.
*/
@Test
public void testRenameFileInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED);
hdfs.createSnapshot(sub1, snap1);
hdfs.rename(file1, file2);
// Query the diff report and make sure it looks as expected.
SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
System.out.println("DiffList is " + diffReport.toString());
List<DiffReportEntry> entries = diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
file2.getName()));
}
@Test (timeout=60000)
public void testRenameTwiceInSnapshot() throws Exception {
hdfs.mkdirs(sub1);
hdfs.allowSnapshot(sub1);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED);
hdfs.createSnapshot(sub1, snap1);
hdfs.rename(file1, file2);
hdfs.createSnapshot(sub1, snap2);
hdfs.rename(file2, file3);
SnapshotDiffReport diffReport;
// Query the diff report and make sure it looks as expected.
diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, snap2);
LOG.info("DiffList is " + diffReport.toString());
List<DiffReportEntry> entries = diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
file2.getName()));
diffReport = hdfs.getSnapshotDiffReport(sub1, snap2, "");
LOG.info("DiffList is " + diffReport.toString());
entries = diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, file2.getName(),
file3.getName()));
diffReport = hdfs.getSnapshotDiffReport(sub1, snap1, "");
LOG.info("DiffList is " + diffReport.toString());
entries = diffReport.getDiffList();
assertTrue(entries.size() == 2);
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, file1.getName(),
file3.getName()));
}
@Test (timeout=60000)
public void testRenameFileInSubDirOfDirWithSnapshot() throws Exception {
final Path sub2 = new Path(sub1, "sub2");
final Path sub2file1 = new Path(sub2, "sub2file1");
final Path sub2file2 = new Path(sub2, "sub2file2");
final String sub1snap1 = "sub1snap1";
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub2);
DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap1);
// Rename the file in the subdirectory.
hdfs.rename(sub2file1, sub2file2);
// Query the diff report and make sure it looks as expected.
SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, sub1snap1,
"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List<DiffReportEntry> entries = diffReport.getDiffList();
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, sub2.getName(),
null));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, sub2.getName()
+ "/" + sub2file1.getName(), sub2.getName() + "/" + sub2file2.getName()));
}
@Test (timeout=60000)
public void testRenameDirectoryInSnapshot() throws Exception {
final Path sub2 = new Path(sub1, "sub2");
final Path sub3 = new Path(sub1, "sub3");
final Path sub2file1 = new Path(sub2, "sub2file1");
final String sub1snap1 = "sub1snap1";
hdfs.mkdirs(sub1);
hdfs.mkdirs(sub2);
DFSTestUtil.createFile(hdfs, sub2file1, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sub1, sub1snap1);
// First rename the sub-directory.
hdfs.rename(sub2, sub3);
// Query the diff report and make sure it looks as expected.
SnapshotDiffReport diffReport = hdfs.getSnapshotDiffReport(sub1, sub1snap1,
"");
LOG.info("DiffList is \n\"" + diffReport.toString() + "\"");
List<DiffReportEntry> entries = diffReport.getDiffList();
assertEquals(2, entries.size());
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, sub2.getName(),
sub3.getName()));
}
/**
* After the following steps:
* <pre>
* 1. Take snapshot s1 on /dir1 at time t1.
* 2. Take snapshot s2 on /dir2 at time t2.
* 3. Modify the subtree of /dir2/foo/ to make it a dir with snapshots.
* 4. Take snapshot s3 on /dir1 at time t3.
* 5. Rename /dir2/foo/ to /dir1/foo/.
* </pre>
* When changes happening on foo, the diff should be recorded in snapshot s2.
*/
@Test (timeout=60000)
public void testRenameDirAcrossSnapshottableDirs() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir2, "foo");
final Path bar = new Path(foo, "bar");
final Path bar2 = new Path(foo, "bar2");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
hdfs.setReplication(bar2, REPL_1);
hdfs.delete(bar, true);
hdfs.createSnapshot(sdir1, "s3");
final Path newfoo = new Path(sdir1, "foo");
hdfs.rename(foo, newfoo);
// still can visit the snapshot copy of bar through
// /dir2/.snapshot/s2/foo/bar
final Path snapshotBar = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo/bar");
assertTrue(hdfs.exists(snapshotBar));
// delete bar2
final Path newBar2 = new Path(newfoo, "bar2");
assertTrue(hdfs.exists(newBar2));
hdfs.delete(newBar2, true);
// /dir2/.snapshot/s2/foo/bar2 should still work
final Path bar2_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
FileStatus status = hdfs.getFileStatus(bar2_s2);
assertEquals(REPL, status.getReplication());
final Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
"foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
}
/**
* Rename a single file across snapshottable dirs.
*/
@Test (timeout=60000)
public void testRenameFileAcrossSnapshottableDirs() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir2, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
hdfs.createSnapshot(sdir1, "s3");
final Path newfoo = new Path(sdir1, "foo");
hdfs.rename(foo, newfoo);
// change the replication factor of foo
hdfs.setReplication(newfoo, REPL_1);
// /dir2/.snapshot/s2/foo should still work
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo");
assertTrue(hdfs.exists(foo_s2));
FileStatus status = hdfs.getFileStatus(foo_s2);
assertEquals(REPL, status.getReplication());
final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3",
"foo");
assertFalse(hdfs.exists(foo_s3));
INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
INodeFile sfoo = fsdir.getINode(newfoo.toString()).asFile();
assertEquals(s2.getId(), sfoo.getDiffs().getLastSnapshotId());
}
/**
* Test renaming a dir and then delete snapshots.
*/
@Test
public void testRenameDirAndDeleteSnapshot_1() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir2, "foo");
final Path bar = new Path(foo, "bar");
final Path bar2 = new Path(foo, "bar2");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
hdfs.createSnapshot(sdir1, "s3");
final Path newfoo = new Path(sdir1, "foo");
hdfs.rename(foo, newfoo);
final Path newbar = new Path(newfoo, bar.getName());
final Path newbar2 = new Path(newfoo, bar2.getName());
final Path newbar3 = new Path(newfoo, "bar3");
DFSTestUtil.createFile(hdfs, newbar3, BLOCKSIZE, REPL, SEED);
hdfs.createSnapshot(sdir1, "s4");
hdfs.delete(newbar, true);
hdfs.delete(newbar3, true);
assertFalse(hdfs.exists(newbar3));
assertFalse(hdfs.exists(bar));
final Path bar_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
"foo/bar");
final Path bar3_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
"foo/bar3");
assertTrue(hdfs.exists(bar_s4));
assertTrue(hdfs.exists(bar3_s4));
hdfs.createSnapshot(sdir1, "s5");
hdfs.delete(newbar2, true);
assertFalse(hdfs.exists(bar2));
final Path bar2_s5 = SnapshotTestHelper.getSnapshotPath(sdir1, "s5",
"foo/bar2");
assertTrue(hdfs.exists(bar2_s5));
// delete snapshot s5. The diff of s5 should be combined to s4
hdfs.deleteSnapshot(sdir1, "s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(bar2_s5));
final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
"foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
// delete snapshot s4. The diff of s4 should be combined to s2 instead of
// s3.
hdfs.deleteSnapshot(sdir1, "s4");
assertFalse(hdfs.exists(bar_s4));
Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar");
assertFalse(hdfs.exists(bar_s3));
final Path bar_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo/bar");
assertTrue(hdfs.exists(bar_s2));
assertFalse(hdfs.exists(bar2_s4));
Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
final Path bar2_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo/bar2");
assertTrue(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar3_s4));
Path bar3_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar3");
assertFalse(hdfs.exists(bar3_s3));
bar3_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar3");
assertFalse(hdfs.exists(bar3_s3));
final Path bar3_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo/bar3");
assertFalse(hdfs.exists(bar3_s2));
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// delete snapshot s2.
hdfs.deleteSnapshot(sdir2, "s2");
assertFalse(hdfs.exists(bar_s2));
assertFalse(hdfs.exists(bar2_s2));
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1, "s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1, "s1");
restartClusterAndCheckImage(true);
}
private void restartClusterAndCheckImage(boolean compareQuota)
throws IOException {
File fsnBefore = new File(testDir, "dumptree_before");
File fsnMiddle = new File(testDir, "dumptree_middle");
File fsnAfter = new File(testDir, "dumptree_after");
SnapshotTestHelper.dumpTree2File(fsdir, fsnBefore);
cluster.shutdown(false, false);
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPL).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
// later check fsnMiddle to see if the edit log is applied correctly
SnapshotTestHelper.dumpTree2File(fsdir, fsnMiddle);
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPL).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
// dump the namespace loaded from fsimage
SnapshotTestHelper.dumpTree2File(fsdir, fsnAfter);
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle,
compareQuota);
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter,
compareQuota);
}
/**
* Test renaming a file and then delete snapshots.
*/
@Test
public void testRenameFileAndDeleteSnapshot() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir2, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
hdfs.createSnapshot(sdir1, "s3");
final Path newfoo = new Path(sdir1, "foo");
hdfs.rename(foo, newfoo);
hdfs.setReplication(newfoo, REPL_1);
hdfs.createSnapshot(sdir1, "s4");
hdfs.setReplication(newfoo, REPL_2);
FileStatus status = hdfs.getFileStatus(newfoo);
assertEquals(REPL_2, status.getReplication());
final Path foo_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo");
status = hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1, status.getReplication());
hdfs.createSnapshot(sdir1, "s5");
final Path foo_s5 = SnapshotTestHelper.getSnapshotPath(sdir1, "s5", "foo");
status = hdfs.getFileStatus(foo_s5);
assertEquals(REPL_2, status.getReplication());
// delete snapshot s5.
hdfs.deleteSnapshot(sdir1, "s5");
restartClusterAndCheckImage(true);
assertFalse(hdfs.exists(foo_s5));
status = hdfs.getFileStatus(foo_s4);
assertEquals(REPL_1, status.getReplication());
// delete snapshot s4.
hdfs.deleteSnapshot(sdir1, "s4");
assertFalse(hdfs.exists(foo_s4));
Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo");
assertFalse(hdfs.exists(foo_s3));
foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo");
assertFalse(hdfs.exists(foo_s3));
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
assertTrue(hdfs.exists(foo_s2));
status = hdfs.getFileStatus(foo_s2);
assertEquals(REPL, status.getReplication());
INodeFile snode = fsdir.getINode(newfoo.toString()).asFile();
assertEquals(1, snode.getDiffs().asList().size());
INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(), snode.getDiffs().getLastSnapshotId());
// restart cluster
restartClusterAndCheckImage(true);
// delete snapshot s2.
hdfs.deleteSnapshot(sdir2, "s2");
assertFalse(hdfs.exists(foo_s2));
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1, "s3");
restartClusterAndCheckImage(true);
hdfs.deleteSnapshot(sdir1, "s1");
restartClusterAndCheckImage(true);
}
/**
* Test rename a dir and a file multiple times across snapshottable
* directories: /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
*
* Only create snapshots in the beginning (before the rename).
*/
@Test
public void testRenameMoreThanOnceAcrossSnapDirs() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path sdir3 = new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1 = new Path(sdir1, "foo");
final Path bar1_dir1 = new Path(foo_dir1, "bar1");
final Path bar2_dir1 = new Path(sdir1, "bar");
DFSTestUtil.createFile(hdfs, bar1_dir1, BLOCKSIZE, REPL, SEED);
DFSTestUtil.createFile(hdfs, bar2_dir1, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s3");
// 1. /dir1/foo -> /dir2/foo, /dir1/bar -> /dir2/bar
final Path foo_dir2 = new Path(sdir2, "foo");
hdfs.rename(foo_dir1, foo_dir2);
final Path bar2_dir2 = new Path(sdir2, "bar");
hdfs.rename(bar2_dir1, bar2_dir2);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// modification on /dir2/foo and /dir2/bar
final Path bar1_dir2 = new Path(foo_dir2, "bar1");
hdfs.setReplication(bar1_dir2, REPL_1);
hdfs.setReplication(bar2_dir2, REPL_1);
// check
final Path bar1_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
"foo/bar1");
final Path bar2_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
"bar");
final Path bar1_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo/bar1");
final Path bar2_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
FileStatus statusBar1 = hdfs.getFileStatus(bar1_s1);
assertEquals(REPL, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL_1, statusBar1.getReplication());
FileStatus statusBar2 = hdfs.getFileStatus(bar2_s1);
assertEquals(REPL, statusBar2.getReplication());
statusBar2 = hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL_1, statusBar2.getReplication());
// 2. /dir2/foo -> /dir3/foo, /dir2/bar -> /dir3/bar
final Path foo_dir3 = new Path(sdir3, "foo");
hdfs.rename(foo_dir2, foo_dir3);
final Path bar2_dir3 = new Path(sdir3, "bar");
hdfs.rename(bar2_dir2, bar2_dir3);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// modification on /dir3/foo and /dir3/bar
final Path bar1_dir3 = new Path(foo_dir3, "bar1");
hdfs.setReplication(bar1_dir3, REPL_2);
hdfs.setReplication(bar2_dir3, REPL_2);
// check
final Path bar1_s3 = SnapshotTestHelper.getSnapshotPath(sdir3, "s3",
"foo/bar1");
final Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir3, "s3",
"bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1 = hdfs.getFileStatus(bar1_s1);
assertEquals(REPL, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2, statusBar1.getReplication());
statusBar2 = hdfs.getFileStatus(bar2_s1);
assertEquals(REPL, statusBar2.getReplication());
statusBar2 = hdfs.getFileStatus(bar2_dir3);
assertEquals(REPL_2, statusBar2.getReplication());
// 3. /dir3/foo -> /dir2/foo, /dir3/bar -> /dir2/bar
hdfs.rename(foo_dir3, foo_dir2);
hdfs.rename(bar2_dir3, bar2_dir2);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// modification on /dir2/foo
hdfs.setReplication(bar1_dir2, REPL);
hdfs.setReplication(bar2_dir2, REPL);
// check
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
statusBar1 = hdfs.getFileStatus(bar1_s1);
assertEquals(REPL, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL, statusBar1.getReplication());
statusBar2 = hdfs.getFileStatus(bar2_s1);
assertEquals(REPL, statusBar2.getReplication());
statusBar2 = hdfs.getFileStatus(bar2_dir2);
assertEquals(REPL, statusBar2.getReplication());
// 4. /dir2/foo -> /dir1/foo, /dir2/bar -> /dir1/bar
hdfs.rename(foo_dir2, foo_dir1);
hdfs.rename(bar2_dir2, bar2_dir1);
// check the internal details
INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString())
.asReference();
INodeReference.WithCount fooWithCount = (WithCount) fooRef
.getReferredINode();
// only 2 references: one in deleted list of sdir1, one in created list of
// sdir1
assertEquals(2, fooWithCount.getReferenceCount());
INodeDirectory foo = fooWithCount.asDirectory();
assertEquals(1, foo.getDiffs().asList().size());
INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), foo.getDirectoryWithSnapshotFeature()
.getLastSnapshotId());
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
assertEquals(1, bar1.getDiffs().asList().size());
assertEquals(s1.getId(), bar1.getDiffs().getLastSnapshotId());
INodeReference barRef = fsdir.getINode4Write(bar2_dir1.toString())
.asReference();
INodeReference.WithCount barWithCount = (WithCount) barRef
.getReferredINode();
assertEquals(2, barWithCount.getReferenceCount());
INodeFile bar = barWithCount.asFile();
assertEquals(1, bar.getDiffs().asList().size());
assertEquals(s1.getId(), bar.getDiffs().getLastSnapshotId());
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// delete foo
hdfs.delete(foo_dir1, true);
restartClusterAndCheckImage(true);
hdfs.delete(bar2_dir1, true);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// check
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar2_s1));
assertFalse(hdfs.exists(bar1_s2));
assertFalse(hdfs.exists(bar2_s2));
assertFalse(hdfs.exists(bar1_s3));
assertFalse(hdfs.exists(bar2_s3));
assertFalse(hdfs.exists(foo_dir1));
assertFalse(hdfs.exists(bar1_dir1));
assertFalse(hdfs.exists(bar2_dir1));
statusBar1 = hdfs.getFileStatus(bar1_s1);
assertEquals(REPL, statusBar1.getReplication());
statusBar2 = hdfs.getFileStatus(bar2_s1);
assertEquals(REPL, statusBar2.getReplication());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
fooRef = fsdir.getINode(foo_s1.toString()).asReference();
fooWithCount = (WithCount) fooRef.getReferredINode();
assertEquals(1, fooWithCount.getReferenceCount());
barRef = fsdir.getINode(bar2_s1.toString()).asReference();
barWithCount = (WithCount) barRef.getReferredINode();
assertEquals(1, barWithCount.getReferenceCount());
}
/**
* Test rename a dir multiple times across snapshottable directories:
* /dir1/foo -> /dir2/foo -> /dir3/foo -> /dir2/foo -> /dir1/foo
*
* Create snapshots after each rename.
*/
@Test
public void testRenameMoreThanOnceAcrossSnapDirs_2() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path sdir3 = new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo_dir1 = new Path(sdir1, "foo");
final Path bar1_dir1 = new Path(foo_dir1, "bar1");
final Path bar_dir1 = new Path(sdir1, "bar");
DFSTestUtil.createFile(hdfs, bar1_dir1, BLOCKSIZE, REPL, SEED);
DFSTestUtil.createFile(hdfs, bar_dir1, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s3");
// 1. /dir1/foo -> /dir2/foo, /dir1/bar -> /dir2/bar
final Path foo_dir2 = new Path(sdir2, "foo");
hdfs.rename(foo_dir1, foo_dir2);
final Path bar_dir2 = new Path(sdir2, "bar");
hdfs.rename(bar_dir1, bar_dir2);
// modification on /dir2/foo and /dir2/bar
final Path bar1_dir2 = new Path(foo_dir2, "bar1");
hdfs.setReplication(bar1_dir2, REPL_1);
hdfs.setReplication(bar_dir2, REPL_1);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// create snapshots
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s11");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s22");
SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s33");
// 2. /dir2/foo -> /dir3/foo
final Path foo_dir3 = new Path(sdir3, "foo");
hdfs.rename(foo_dir2, foo_dir3);
final Path bar_dir3 = new Path(sdir3, "bar");
hdfs.rename(bar_dir2, bar_dir3);
// modification on /dir3/foo
final Path bar1_dir3 = new Path(foo_dir3, "bar1");
hdfs.setReplication(bar1_dir3, REPL_2);
hdfs.setReplication(bar_dir3, REPL_2);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// create snapshots
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s111");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s222");
SnapshotTestHelper.createSnapshot(hdfs, sdir3, "s333");
// check
final Path bar1_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
"foo/bar1");
final Path bar1_s22 = SnapshotTestHelper.getSnapshotPath(sdir2, "s22",
"foo/bar1");
final Path bar1_s333 = SnapshotTestHelper.getSnapshotPath(sdir3, "s333",
"foo/bar1");
final Path bar_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
"bar");
final Path bar_s22 = SnapshotTestHelper.getSnapshotPath(sdir2, "s22",
"bar");
final Path bar_s333 = SnapshotTestHelper.getSnapshotPath(sdir3, "s333",
"bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
FileStatus statusBar1 = hdfs.getFileStatus(bar1_s1);
assertEquals(REPL, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_dir3);
assertEquals(REPL_2, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2, statusBar1.getReplication());
FileStatus statusBar = hdfs.getFileStatus(bar_s1);
assertEquals(REPL, statusBar.getReplication());
statusBar = hdfs.getFileStatus(bar_dir3);
assertEquals(REPL_2, statusBar.getReplication());
statusBar = hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1, statusBar.getReplication());
statusBar = hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2, statusBar.getReplication());
// 3. /dir3/foo -> /dir2/foo
hdfs.rename(foo_dir3, foo_dir2);
hdfs.rename(bar_dir3, bar_dir2);
// modification on /dir2/foo
hdfs.setReplication(bar1_dir2, REPL);
hdfs.setReplication(bar_dir2, REPL);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// create snapshots
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1111");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2222");
// check
final Path bar1_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222",
"foo/bar1");
final Path bar_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222",
"bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
statusBar1 = hdfs.getFileStatus(bar1_s1);
assertEquals(REPL, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_dir2);
assertEquals(REPL, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_s22);
assertEquals(REPL_1, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_s333);
assertEquals(REPL_2, statusBar1.getReplication());
statusBar1 = hdfs.getFileStatus(bar1_s2222);
assertEquals(REPL, statusBar1.getReplication());
statusBar = hdfs.getFileStatus(bar_s1);
assertEquals(REPL, statusBar.getReplication());
statusBar = hdfs.getFileStatus(bar_dir2);
assertEquals(REPL, statusBar.getReplication());
statusBar = hdfs.getFileStatus(bar_s22);
assertEquals(REPL_1, statusBar.getReplication());
statusBar = hdfs.getFileStatus(bar_s333);
assertEquals(REPL_2, statusBar.getReplication());
statusBar = hdfs.getFileStatus(bar_s2222);
assertEquals(REPL, statusBar.getReplication());
// 4. /dir2/foo -> /dir1/foo
hdfs.rename(foo_dir2, foo_dir1);
hdfs.rename(bar_dir2, bar_dir1);
// check the internal details
INodeDirectory sdir1Node = fsdir.getINode(sdir1.toString()).asDirectory();
INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
INodeDirectory sdir3Node = fsdir.getINode(sdir3.toString()).asDirectory();
INodeReference fooRef = fsdir.getINode4Write(foo_dir1.toString())
.asReference();
INodeReference.WithCount fooWithCount = (WithCount) fooRef.getReferredINode();
// 5 references: s1, s22, s333, s2222, current tree of sdir1
assertEquals(5, fooWithCount.getReferenceCount());
INodeDirectory foo = fooWithCount.asDirectory();
List<DirectoryDiff> fooDiffs = foo.getDiffs().asList();
assertEquals(4, fooDiffs.size());
Snapshot s2222 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2222"));
Snapshot s333 = sdir3Node.getSnapshot(DFSUtil.string2Bytes("s333"));
Snapshot s22 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s22"));
Snapshot s1 = sdir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(), fooDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(), fooDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
INodeFile bar1 = fsdir.getINode4Write(bar1_dir1.toString()).asFile();
List<FileDiff> bar1Diffs = bar1.getDiffs().asList();
assertEquals(3, bar1Diffs.size());
assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
assertEquals(s22.getId(), bar1Diffs.get(1).getSnapshotId());
assertEquals(s1.getId(), bar1Diffs.get(0).getSnapshotId());
INodeReference barRef = fsdir.getINode4Write(bar_dir1.toString())
.asReference();
INodeReference.WithCount barWithCount = (WithCount) barRef.getReferredINode();
// 5 references: s1, s22, s333, s2222, current tree of sdir1
assertEquals(5, barWithCount.getReferenceCount());
INodeFile bar = barWithCount.asFile();
List<FileDiff> barDiffs = bar.getDiffs().asList();
assertEquals(4, barDiffs.size());
assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
assertEquals(s333.getId(), barDiffs.get(2).getSnapshotId());
assertEquals(s22.getId(), barDiffs.get(1).getSnapshotId());
assertEquals(s1.getId(), barDiffs.get(0).getSnapshotId());
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// delete foo
hdfs.delete(foo_dir1, true);
hdfs.delete(bar_dir1, true);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// check
final Path bar1_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111",
"foo/bar1");
final Path bar_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111",
"bar");
assertTrue(hdfs.exists(bar1_s1));
assertTrue(hdfs.exists(bar1_s22));
assertTrue(hdfs.exists(bar1_s333));
assertTrue(hdfs.exists(bar1_s2222));
assertFalse(hdfs.exists(bar1_s1111));
assertTrue(hdfs.exists(bar_s1));
assertTrue(hdfs.exists(bar_s22));
assertTrue(hdfs.exists(bar_s333));
assertTrue(hdfs.exists(bar_s2222));
assertFalse(hdfs.exists(bar_s1111));
final Path foo_s2222 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2222",
"foo");
fooRef = fsdir.getINode(foo_s2222.toString()).asReference();
fooWithCount = (WithCount) fooRef.getReferredINode();
assertEquals(4, fooWithCount.getReferenceCount());
foo = fooWithCount.asDirectory();
fooDiffs = foo.getDiffs().asList();
assertEquals(4, fooDiffs.size());
assertEquals(s2222.getId(), fooDiffs.get(3).getSnapshotId());
bar1Diffs = bar1.getDiffs().asList();
assertEquals(3, bar1Diffs.size());
assertEquals(s333.getId(), bar1Diffs.get(2).getSnapshotId());
barRef = fsdir.getINode(bar_s2222.toString()).asReference();
barWithCount = (WithCount) barRef.getReferredINode();
assertEquals(4, barWithCount.getReferenceCount());
bar = barWithCount.asFile();
barDiffs = bar.getDiffs().asList();
assertEquals(4, barDiffs.size());
assertEquals(s2222.getId(), barDiffs.get(3).getSnapshotId());
}
/**
* Test rename from a non-snapshottable dir to a snapshottable dir
*/
@Test (timeout=60000)
public void testRenameFromNonSDir2SDir() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir2, snap1);
final Path newfoo = new Path(sdir2, "foo");
hdfs.rename(foo, newfoo);
INode fooNode = fsdir.getINode4Write(newfoo.toString());
assertTrue(fooNode instanceof INodeDirectory);
}
/**
* Test rename where the src/dst directories are both snapshottable
* directories without snapshots. In such case we need to update the
* snapshottable dir list in SnapshotManager.
*/
@Test (timeout=60000)
public void testRenameAndUpdateSnapshottableDirs() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(sdir2, "bar");
hdfs.mkdirs(foo);
hdfs.mkdirs(bar);
hdfs.allowSnapshot(foo);
SnapshotTestHelper.createSnapshot(hdfs, bar, snap1);
assertEquals(2, fsn.getSnapshottableDirListing().length);
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
long fooId = fooNode.getId();
try {
hdfs.rename(foo, bar, Rename.OVERWRITE);
fail("Expect exception since " + bar
+ " is snapshottable and already has snapshots");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(bar.toString()
+ " is snapshottable and already has snapshots", e);
}
hdfs.deleteSnapshot(bar, snap1);
hdfs.rename(foo, bar, Rename.OVERWRITE);
SnapshottableDirectoryStatus[] dirs = fsn.getSnapshottableDirListing();
assertEquals(1, dirs.length);
assertEquals(bar, dirs[0].getFullPath());
assertEquals(fooId, dirs[0].getDirStatus().getFileId());
}
/**
* After rename, delete the snapshot in src
*/
@Test
public void testRenameDirAndDeleteSnapshot_2() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir2, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s3");
final Path newfoo = new Path(sdir1, "foo");
hdfs.rename(foo, newfoo);
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
final Path bar2 = new Path(newfoo, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
hdfs.createSnapshot(sdir1, "s4");
hdfs.delete(newfoo, true);
final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
"foo/bar2");
assertTrue(hdfs.exists(bar2_s4));
final Path bar_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4",
"foo/bar");
assertTrue(hdfs.exists(bar_s4));
// delete snapshot s4. The diff of s4 should be combined to s3
hdfs.deleteSnapshot(sdir1, "s4");
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar");
assertFalse(hdfs.exists(bar_s3));
bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar");
assertTrue(hdfs.exists(bar_s3));
Path bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
bar2_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo/bar2");
assertFalse(hdfs.exists(bar2_s3));
// delete snapshot s3
hdfs.deleteSnapshot(sdir2, "s3");
final Path bar_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2",
"foo/bar");
assertTrue(hdfs.exists(bar_s2));
// check internal details
INodeDirectory sdir2Node = fsdir.getINode(sdir2.toString()).asDirectory();
Snapshot s2 = sdir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo");
INodeReference fooRef = fsdir.getINode(foo_s2.toString()).asReference();
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount fooWC = (WithCount) fooRef.getReferredINode();
assertEquals(1, fooWC.getReferenceCount());
INodeDirectory fooDir = fooWC.getReferredINode().asDirectory();
List<DirectoryDiff> diffs = fooDir.getDiffs().asList();
assertEquals(1, diffs.size());
assertEquals(s2.getId(), diffs.get(0).getSnapshotId());
// restart the cluster and check fsimage
restartClusterAndCheckImage(true);
// delete snapshot s2.
hdfs.deleteSnapshot(sdir2, "s2");
assertFalse(hdfs.exists(bar_s2));
restartClusterAndCheckImage(true);
// make sure the whole referred subtree has been destroyed
QuotaCounts q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3, q.getNameSpace());
assertEquals(0, q.getStorageSpace());
hdfs.deleteSnapshot(sdir1, "s1");
restartClusterAndCheckImage(true);
q = fsdir.getRoot().getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3, q.getNameSpace());
assertEquals(0, q.getStorageSpace());
}
/**
* Rename a file and then append the same file.
*/
@Test
public void testRenameAndAppend() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir1, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, snap1);
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
INode fooRef = fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
FSDataOutputStream out = hdfs.append(foo2);
try {
byte[] content = new byte[1024];
(new Random()).nextBytes(content);
out.write(content);
fooRef = fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
INodeFile fooNode = fooRef.asFile();
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.isUnderConstruction());
} finally {
if (out != null) {
out.close();
}
}
fooRef = fsdir.getINode4Write(foo2.toString());
assertTrue(fooRef instanceof INodeReference.DstReference);
INodeFile fooNode = fooRef.asFile();
assertTrue(fooNode.isWithSnapshot());
assertFalse(fooNode.isUnderConstruction());
restartClusterAndCheckImage(true);
}
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir before taking the snapshot.
*/
@Test
public void testRenameUndo_1() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
final Path dir2file = new Path(sdir2, "file");
DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
final Path newfoo = new Path(sdir2, "foo");
boolean result = hdfs.rename(foo, newfoo);
assertFalse(result);
// check the current internal details
INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, both the created and deleted list of sdir1
// should be empty
ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode.isDirectory() && fooNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
INode fooNode_s1 = fsdir.getINode(foo_s1.toString());
assertTrue(fooNode_s1 == fooNode);
// check sdir2
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
/**
* Test the undo section of rename. Before the rename, we create the renamed
* file/dir after taking the snapshot.
*/
@Test
public void testRenameUndo_2() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
final Path dir2file = new Path(sdir2, "file");
DFSTestUtil.createFile(hdfs, dir2file, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
// create foo after taking snapshot
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
INodeDirectory dir2 = fsdir.getINode4Write(sdir2.toString()).asDirectory();
INodeDirectory mockDir2 = spy(dir2);
doReturn(false).when(mockDir2).addChild((INode) anyObject(), anyBoolean(),
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir2, mockDir2, fsdir.getINodeMap());
final Path newfoo = new Path(sdir2, "foo");
boolean result = hdfs.rename(foo, newfoo);
assertFalse(result);
// check the current internal details
INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
ReadOnlyList<INode> dir1Children = dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir1Children.size());
assertEquals(foo.getName(), dir1Children.get(0).getLocalName());
List<DirectoryDiff> dir1Diffs = dir1Node.getDiffs().asList();
assertEquals(1, dir1Diffs.size());
assertEquals(s1.getId(), dir1Diffs.get(0).getSnapshotId());
// after the undo of rename, the created list of sdir1 should contain
// 1 element
ChildrenDiff childrenDiff = dir1Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
INode fooNode = fsdir.getINode4Write(foo.toString());
assertTrue(fooNode instanceof INodeDirectory);
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1", "foo");
assertFalse(hdfs.exists(foo_s1));
// check sdir2
assertFalse(hdfs.exists(newfoo));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
assertFalse(dir2Node.isWithSnapshot());
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
assertEquals(dir2file.getName(), dir2Children.get(0).getLocalName());
}
/**
* Test the undo section of the second-time rename.
*/
@Test
public void testRenameUndo_3() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path sdir3 = new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3 = spy(dir3);
doReturn(false).when(mockDir3).addChild((INode) anyObject(), anyBoolean(),
Mockito.anyInt());
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
final Path foo_dir2 = new Path(sdir2, "foo2");
final Path foo_dir3 = new Path(sdir3, "foo3");
hdfs.rename(foo, foo_dir2);
boolean result = hdfs.rename(foo_dir2, foo_dir3);
assertFalse(result);
// check the current internal details
INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
Snapshot s2 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s2"));
ReadOnlyList<INode> dir2Children = dir2Node
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
List<DirectoryDiff> dir2Diffs = dir2Node.getDiffs().asList();
assertEquals(1, dir2Diffs.size());
assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
ChildrenDiff childrenDiff = dir2Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(sdir2, "s2", "foo2");
assertFalse(hdfs.exists(foo_s2));
INode fooNode = fsdir.getINode4Write(foo_dir2.toString());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
assertTrue(fooNode instanceof INodeReference.DstReference);
List<DirectoryDiff> fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(1, fooDiffs.size());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
// create snapshot on sdir2 and rename again
hdfs.createSnapshot(sdir2, "s3");
result = hdfs.rename(foo_dir2, foo_dir3);
assertFalse(result);
// check internal details again
dir2Node = fsdir.getINode4Write(sdir2.toString()).asDirectory();
Snapshot s3 = dir2Node.getSnapshot(DFSUtil.string2Bytes("s3"));
fooNode = fsdir.getINode4Write(foo_dir2.toString());
dir2Children = dir2Node.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, dir2Children.size());
dir2Diffs = dir2Node.getDiffs().asList();
assertEquals(2, dir2Diffs.size());
assertEquals(s2.getId(), dir2Diffs.get(0).getSnapshotId());
assertEquals(s3.getId(), dir2Diffs.get(1).getSnapshotId());
childrenDiff = dir2Diffs.get(0).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(1, childrenDiff.getList(ListType.CREATED).size());
assertTrue(childrenDiff.getList(ListType.CREATED).get(0) == fooNode);
childrenDiff = dir2Diffs.get(1).getChildrenDiff();
assertEquals(0, childrenDiff.getList(ListType.DELETED).size());
assertEquals(0, childrenDiff.getList(ListType.CREATED).size());
final Path foo_s3 = SnapshotTestHelper.getSnapshotPath(sdir2, "s3", "foo2");
assertFalse(hdfs.exists(foo_s2));
assertTrue(hdfs.exists(foo_s3));
assertTrue(fooNode instanceof INodeReference.DstReference);
fooDiffs = fooNode.asDirectory().getDiffs().asList();
assertEquals(2, fooDiffs.size());
assertEquals(s1.getId(), fooDiffs.get(0).getSnapshotId());
assertEquals(s3.getId(), fooDiffs.get(1).getSnapshotId());
}
/**
* Test undo where dst node being overwritten is a reference node
*/
@Test
public void testRenameUndo_4() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path sdir3 = new Path("/dir3");
hdfs.mkdirs(sdir1);
hdfs.mkdirs(sdir2);
hdfs.mkdirs(sdir3);
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
final Path foo2 = new Path(sdir2, "foo2");
hdfs.mkdirs(foo2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
// rename foo2 to foo3, so that foo3 will be a reference node
final Path foo3 = new Path(sdir3, "foo3");
hdfs.rename(foo2, foo3);
INode foo3Node = fsdir.getINode4Write(foo3.toString());
assertTrue(foo3Node.isReference());
INodeDirectory dir3 = fsdir.getINode4Write(sdir3.toString()).asDirectory();
INodeDirectory mockDir3 = spy(dir3);
// fail the rename but succeed in undo
doReturn(false).when(mockDir3).addChild((INode) Mockito.isNull(),
anyBoolean(), Mockito.anyInt());
Mockito.when(mockDir3.addChild((INode) Mockito.isNotNull(), anyBoolean(),
Mockito.anyInt())).thenReturn(false).thenCallRealMethod();
INodeDirectory root = fsdir.getINode4Write("/").asDirectory();
root.replaceChild(dir3, mockDir3, fsdir.getINodeMap());
foo3Node.setParent(mockDir3);
try {
hdfs.rename(foo, foo3, Rename.OVERWRITE);
fail("the rename from " + foo + " to " + foo3 + " should fail");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("rename from " + foo + " to "
+ foo3 + " failed.", e);
}
// make sure the undo is correct
final INode foo3Node_undo = fsdir.getINode4Write(foo3.toString());
assertSame(foo3Node, foo3Node_undo);
INodeReference.WithCount foo3_wc = (WithCount) foo3Node.asReference()
.getReferredINode();
assertEquals(2, foo3_wc.getReferenceCount());
assertSame(foo3Node, foo3_wc.getParentReference());
}
/**
* Test rename while the rename operation will exceed the quota in the dst
* tree.
*/
@Test
public void testRenameUndo_5() throws Exception {
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
final Path subdir2 = new Path(dir2, "subdir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subdir2);
final Path foo = new Path(dir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// set ns quota of dir2 to 4, so the current remaining is 2 (already has
// dir2, and subdir2)
hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
final Path foo2 = new Path(subdir2, foo.getName());
FSDirectory fsdir2 = Mockito.spy(fsdir);
Mockito.doThrow(new NSQuotaExceededException("fake exception")).when(fsdir2)
.addLastINode((INodesInPath) Mockito.anyObject(),
(INode) Mockito.anyObject(), Mockito.anyBoolean());
Whitebox.setInternalState(fsn, "dir", fsdir2);
// rename /test/dir1/foo to /test/dir2/subdir2/foo.
// FSDirectory#verifyQuota4Rename will pass since the remaining quota is 2.
// However, the rename operation will fail since we let addLastINode throw
// NSQuotaExceededException
boolean rename = hdfs.rename(foo, foo2);
assertFalse(rename);
// check the undo
assertTrue(hdfs.exists(foo));
assertTrue(hdfs.exists(bar));
INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
.asDirectory();
List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
INode barNode = fsdir2.getINode4Write(bar.toString());
assertTrue(barNode.getClass() == INodeFile.class);
assertSame(fooNode, barNode.getParent());
List<DirectoryDiff> diffList = dir1Node
.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
// check dir2
INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
assertEquals(2, counts.getNameSpace());
assertEquals(0, counts.getStorageSpace());
childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0);
assertSame(dir2Node, subdir2Node.getParent());
assertSame(subdir2Node, fsdir2.getINode4Write(subdir2.toString()));
diffList = dir2Node.getDiffs().asList();
assertEquals(1, diffList.size());
diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
/**
* Test the rename undo when removing dst node fails
*/
@Test
public void testRenameUndo_6() throws Exception {
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
final Path sub_dir2 = new Path(dir2, "subdir");
final Path subsub_dir2 = new Path(sub_dir2, "subdir");
hdfs.mkdirs(dir1);
hdfs.mkdirs(subsub_dir2);
final Path foo = new Path(dir1, "foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// set ns quota of dir2 to 4, so the current remaining is 1 (already has
// dir2, sub_dir2, and subsub_dir2)
hdfs.setQuota(dir2, 4, Long.MAX_VALUE - 1);
FSDirectory fsdir2 = Mockito.spy(fsdir);
Mockito.doThrow(new RuntimeException("fake exception")).when(fsdir2)
.removeLastINode((INodesInPath) Mockito.anyObject());
Whitebox.setInternalState(fsn, "dir", fsdir2);
// rename /test/dir1/foo to /test/dir2/sub_dir2/subsub_dir2.
// FSDirectory#verifyQuota4Rename will pass since foo only be counted
// as 1 in NS quota. However, the rename operation will fail when removing
// subsub_dir2.
try {
hdfs.rename(foo, subsub_dir2, Rename.OVERWRITE);
fail("Expect QuotaExceedException");
} catch (Exception e) {
String msg = "fake exception";
GenericTestUtils.assertExceptionContains(msg, e);
}
// check the undo
assertTrue(hdfs.exists(foo));
INodeDirectory dir1Node = fsdir2.getINode4Write(dir1.toString())
.asDirectory();
List<INode> childrenList = ReadOnlyList.Util.asList(dir1Node
.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode fooNode = childrenList.get(0);
assertTrue(fooNode.asDirectory().isWithSnapshot());
assertSame(dir1Node, fooNode.getParent());
List<DirectoryDiff> diffList = dir1Node
.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
// check dir2
INodeDirectory dir2Node = fsdir2.getINode4Write(dir2.toString()).asDirectory();
assertTrue(dir2Node.isSnapshottable());
QuotaCounts counts = dir2Node.computeQuotaUsage(fsdir.getBlockStoragePolicySuite());
assertEquals(3, counts.getNameSpace());
assertEquals(0, counts.getStorageSpace());
childrenList = ReadOnlyList.Util.asList(dir2Node.asDirectory()
.getChildrenList(Snapshot.CURRENT_STATE_ID));
assertEquals(1, childrenList.size());
INode subdir2Node = childrenList.get(0);
assertSame(dir2Node, subdir2Node.getParent());
assertSame(subdir2Node, fsdir2.getINode4Write(sub_dir2.toString()));
INode subsubdir2Node = fsdir2.getINode4Write(subsub_dir2.toString());
assertTrue(subsubdir2Node.getClass() == INodeDirectory.class);
assertSame(subdir2Node, subsubdir2Node.getParent());
diffList = ( dir2Node).getDiffs().asList();
assertEquals(1, diffList.size());
diff = diffList.get(0);
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
}
/**
* Test rename to an invalid name (xxx/.snapshot)
*/
@Test
public void testRenameUndo_7() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
// create a snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
// rename bar to /foo/.snapshot which is invalid
final Path invalid = new Path(foo, HdfsConstants.DOT_SNAPSHOT_DIR);
try {
hdfs.rename(bar, invalid);
fail("expect exception since invalid name is used for rename");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("\"" +
HdfsConstants.DOT_SNAPSHOT_DIR + "\" is a reserved name", e);
}
// check
INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
.asDirectory();
INodeDirectory fooNode = fsdir.getINode4Write(foo.toString()).asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);
// this diff is generated while renaming
Snapshot s1 = rootNode.getSnapshot(DFSUtil.string2Bytes(snap1));
assertEquals(s1.getId(), diff.getSnapshotId());
// after undo, the diff should be empty
assertTrue(diff.getChildrenDiff().getList(ListType.DELETED).isEmpty());
assertTrue(diff.getChildrenDiff().getList(ListType.CREATED).isEmpty());
// bar was converted to filewithsnapshot while renaming
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
assertSame(barNode, children.get(0));
assertSame(fooNode, barNode.getParent());
List<FileDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size());
FileDiff barDiff = barDiffList.get(0);
assertEquals(s1.getId(), barDiff.getSnapshotId());
// restart cluster multiple times to make sure the fsimage and edits log are
// correct. Note that when loading fsimage, foo and bar will be converted
// back to normal INodeDirectory and INodeFile since they do not store any
// snapshot data
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPL).build();
cluster.waitActive();
restartClusterAndCheckImage(true);
}
/**
* Test the rename undo when quota of dst tree is exceeded after rename.
*/
@Test
public void testRenameExceedQuota() throws Exception {
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
final Path sub_dir2 = new Path(dir2, "subdir");
final Path subfile_dir2 = new Path(sub_dir2, "subfile");
hdfs.mkdirs(dir1);
DFSTestUtil.createFile(hdfs, subfile_dir2, BLOCKSIZE, REPL, SEED);
final Path foo = new Path(dir1, "foo");
DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// set ns quota of dir2 to 4, so the current remaining is 1 (already has
// dir2, sub_dir2, subfile_dir2, and s2)
hdfs.setQuota(dir2, 5, Long.MAX_VALUE - 1);
// rename /test/dir1/foo to /test/dir2/sub_dir2/subfile_dir2.
// FSDirectory#verifyQuota4Rename will pass since foo only be counted
// as 1 in NS quota. The rename operation will succeed while the real quota
// of dir2 will become 7 (dir2, s2 in dir2, sub_dir2, s2 in sub_dir2,
// subfile_dir2 in deleted list, new subfile, s1 in new subfile).
hdfs.rename(foo, subfile_dir2, Rename.OVERWRITE);
// check dir2
INode dir2Node = fsdir.getINode4Write(dir2.toString());
assertTrue(dir2Node.asDirectory().isSnapshottable());
QuotaCounts counts = dir2Node.computeQuotaUsage(
fsdir.getBlockStoragePolicySuite());
assertEquals(4, counts.getNameSpace());
assertEquals(BLOCKSIZE * REPL * 2, counts.getStorageSpace());
}
@Test
public void testRename2PreDescendant() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
hdfs.mkdirs(bar);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, snap1);
// /dir1/foo/bar -> /dir2/bar
final Path bar2 = new Path(sdir2, "bar");
hdfs.rename(bar, bar2);
// /dir1/foo -> /dir2/bar/foo
final Path foo2 = new Path(bar2, "foo");
hdfs.rename(foo, foo2);
restartClusterAndCheckImage(true);
// delete snap1
hdfs.deleteSnapshot(sdir1, snap1);
restartClusterAndCheckImage(true);
}
/**
* move a directory to its prior descendant
*/
@Test
public void testRename2PreDescendant_2() throws Exception {
final Path root = new Path("/");
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
final Path file1InBar = new Path(bar, "file1");
final Path file2InBar = new Path(bar, "file2");
hdfs.mkdirs(bar);
hdfs.mkdirs(sdir2);
DFSTestUtil.createFile(hdfs, file1InBar, BLOCKSIZE, REPL, SEED);
DFSTestUtil.createFile(hdfs, file2InBar, BLOCKSIZE, REPL, SEED);
hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
// create snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
// delete file1InBar
hdfs.delete(file1InBar, true);
// create another snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, snap2);
// delete file2InBar
hdfs.delete(file2InBar, true);
// /dir1/foo/bar -> /dir2/bar
final Path bar2 = new Path(sdir2, "bar2");
hdfs.rename(bar, bar2);
// /dir1/foo -> /dir2/bar/foo
final Path foo2 = new Path(bar2, "foo2");
hdfs.rename(foo, foo2);
restartClusterAndCheckImage(true);
// delete snapshot snap2
hdfs.deleteSnapshot(root, snap2);
// after deleteing snap2, the WithName node "bar", which originally was
// stored in the deleted list of "foo" for snap2, is moved to its deleted
// list for snap1. In that case, it will not be counted when calculating
// quota for "foo". However, we do not update this quota usage change while
// deleting snap2.
restartClusterAndCheckImage(false);
}
/**
* move a directory to its prior descedant
*/
@Test
public void testRename2PreDescendant_3() throws Exception {
final Path root = new Path("/");
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
final Path fileInBar = new Path(bar, "file");
hdfs.mkdirs(bar);
hdfs.mkdirs(sdir2);
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED);
hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
// create snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, snap1);
// delete fileInBar
hdfs.delete(fileInBar, true);
// create another snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, snap2);
// /dir1/foo/bar -> /dir2/bar
final Path bar2 = new Path(sdir2, "bar2");
hdfs.rename(bar, bar2);
// /dir1/foo -> /dir2/bar/foo
final Path foo2 = new Path(bar2, "foo2");
hdfs.rename(foo, foo2);
restartClusterAndCheckImage(true);
// delete snapshot snap1
hdfs.deleteSnapshot(root, snap1);
restartClusterAndCheckImage(true);
}
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> delete the renamed dir
* -> delete snapshot s on dst tree
*
* Make sure we destroy everything created after the rename under the renamed
* dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_3() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// delete foo2
hdfs.delete(foo2, true);
// delete s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(3, q1.getNameSpace());
final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(1, q2.getNameSpace());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(1, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(1, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
assertEquals(0, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
restartClusterAndCheckImage(true);
}
/**
* After the following operations:
* Rename a dir -> create a snapshot s on dst tree -> rename the renamed dir
* again -> delete snapshot s on dst tree
*
* Make sure we only delete the snapshot s under the renamed dir.
*/
@Test
public void testRenameDirAndDeleteSnapshot_4() throws Exception {
final Path sdir1 = new Path("/dir1");
final Path sdir2 = new Path("/dir2");
final Path foo = new Path(sdir1, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
hdfs.mkdirs(sdir2);
SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sdir2, "s2");
final Path foo2 = new Path(sdir2, "foo");
hdfs.rename(foo, foo2);
// create two new files under foo2
final Path bar2 = new Path(foo2, "bar2");
DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED);
final Path bar3 = new Path(foo2, "bar3");
DFSTestUtil.createFile(hdfs, bar3, BLOCKSIZE, REPL, SEED);
// create a new snapshot on sdir2
hdfs.createSnapshot(sdir2, "s3");
// rename foo2 again
hdfs.rename(foo2, foo);
// delete snapshot s3
hdfs.deleteSnapshot(sdir2, "s3");
// check
final INodeDirectory dir1Node = fsdir.getINode4Write(sdir1.toString())
.asDirectory();
// sdir1 + s1 + foo_s1 (foo) + foo (foo + s1 + bar~bar3)
QuotaCounts q1 = dir1Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(7, q1.getNameSpace());
final INodeDirectory dir2Node = fsdir.getINode4Write(sdir2.toString())
.asDirectory();
QuotaCounts q2 = dir2Node.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(1, q2.getNameSpace());
final Path foo_s1 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1",
foo.getName());
final INode fooRef = fsdir.getINode(foo_s1.toString());
assertTrue(fooRef instanceof INodeReference.WithName);
INodeReference.WithCount wc =
(WithCount) fooRef.asReference().getReferredINode();
assertEquals(2, wc.getReferenceCount());
INodeDirectory fooNode = wc.getReferredINode().asDirectory();
ReadOnlyList<INode> children = fooNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(3, children.size());
assertEquals(bar.getName(), children.get(0).getLocalName());
assertEquals(bar2.getName(), children.get(1).getLocalName());
assertEquals(bar3.getName(), children.get(2).getLocalName());
List<DirectoryDiff> diffList = fooNode.getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s1 = dir1Node.getSnapshot(DFSUtil.string2Bytes("s1"));
assertEquals(s1.getId(), diffList.get(0).getSnapshotId());
ChildrenDiff diff = diffList.get(0).getChildrenDiff();
// bar2 and bar3 in the created list
assertEquals(2, diff.getList(ListType.CREATED).size());
assertEquals(0, diff.getList(ListType.DELETED).size());
final INode fooRef2 = fsdir.getINode4Write(foo.toString());
assertTrue(fooRef2 instanceof INodeReference.DstReference);
INodeReference.WithCount wc2 =
(WithCount) fooRef2.asReference().getReferredINode();
assertSame(wc, wc2);
assertSame(fooRef2, wc.getParentReference());
restartClusterAndCheckImage(true);
}
/**
* This test demonstrates that
* {@link INodeDirectory#removeChild}
* and
* {@link INodeDirectory#addChild}
* should use {@link INode#isInLatestSnapshot} to check if the
* added/removed child should be recorded in snapshots.
*/
@Test
public void testRenameDirAndDeleteSnapshot_5() throws Exception {
final Path dir1 = new Path("/dir1");
final Path dir2 = new Path("/dir2");
final Path dir3 = new Path("/dir3");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
hdfs.mkdirs(dir3);
final Path foo = new Path(dir1, "foo");
hdfs.mkdirs(foo);
SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1");
final Path bar = new Path(foo, "bar");
// create file bar, and foo will become an INodeDirectory with snapshot
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
// delete snapshot s1. now foo is not in any snapshot
hdfs.deleteSnapshot(dir1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// rename /dir1/foo to /dir2/foo
final Path foo2 = new Path(dir2, foo.getName());
hdfs.rename(foo, foo2);
// rename /dir2/foo/bar to /dir3/foo/bar
final Path bar2 = new Path(dir2, "foo/bar");
final Path bar3 = new Path(dir3, "bar");
hdfs.rename(bar2, bar3);
// delete /dir2/foo. Since it is not in any snapshot, we will call its
// destroy function. If we do not use isInLatestSnapshot in removeChild and
// addChild methods in INodeDirectory (with snapshot), the file bar will be
// stored in the deleted list of foo, and will be destroyed.
hdfs.delete(foo2, true);
// check if /dir3/bar still exists
assertTrue(hdfs.exists(bar3));
INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString());
assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent());
}
/**
* Rename and deletion snapshot under the same the snapshottable directory.
*/
@Test
public void testRenameDirAndDeleteSnapshot_6() throws Exception {
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo = new Path(dir2, "foo");
final Path bar = new Path(foo, "bar");
final Path file = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
// take a snapshot on /test
SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
// delete /test/dir2/foo/bar/file after snapshot s0, so that there is a
// snapshot copy recorded in bar
hdfs.delete(file, true);
// rename foo from dir2 to dir1
final Path newfoo = new Path(dir1, foo.getName());
hdfs.rename(foo, newfoo);
final Path foo_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
"dir2/foo");
assertTrue("the snapshot path " + foo_s0 + " should exist",
hdfs.exists(foo_s0));
// delete snapshot s0. The deletion will first go down through dir1, and
// find foo in the created list of dir1. Then it will use null as the prior
// snapshot and continue the snapshot deletion process in the subtree of
// foo. We need to make sure the snapshot s0 can be deleted cleanly in the
// foo subtree.
hdfs.deleteSnapshot(test, "s0");
// check the internal
assertFalse("after deleting s0, " + foo_s0 + " should not exist",
hdfs.exists(foo_s0));
INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
.asDirectory();
assertTrue("the diff list of " + dir2
+ " should be empty after deleting s0", dir2Node.getDiffs().asList()
.isEmpty());
assertTrue(hdfs.exists(newfoo));
INode fooRefNode = fsdir.getINode4Write(newfoo.toString());
assertTrue(fooRefNode instanceof INodeReference.DstReference);
INodeDirectory fooNode = fooRefNode.asDirectory();
// fooNode should be still INodeDirectory (With Snapshot) since we call
// recordModification before the rename
assertTrue(fooNode.isWithSnapshot());
assertTrue(fooNode.getDiffs().asList().isEmpty());
INodeDirectory barNode = fooNode.getChildrenList(Snapshot.CURRENT_STATE_ID)
.get(0).asDirectory();
// bar should also be INodeDirectory (With Snapshot), and both of its diff
// list and children list are empty
assertTrue(barNode.getDiffs().asList().isEmpty());
assertTrue(barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).isEmpty());
restartClusterAndCheckImage(true);
}
/**
* Unit test for HDFS-4842.
*/
@Test
public void testRenameDirAndDeleteSnapshot_7() throws Exception {
fsn.getSnapshotManager().setAllowNestedSnapshots(true);
final Path test = new Path("/test");
final Path dir1 = new Path(test, "dir1");
final Path dir2 = new Path(test, "dir2");
hdfs.mkdirs(dir1);
hdfs.mkdirs(dir2);
final Path foo = new Path(dir2, "foo");
final Path bar = new Path(foo, "bar");
final Path file = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPL, SEED);
// take a snapshot s0 and s1 on /test
SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
SnapshotTestHelper.createSnapshot(hdfs, test, "s1");
// delete file so we have a snapshot copy for s1 in bar
hdfs.delete(file, true);
// create another snapshot on dir2
SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2");
// rename foo from dir2 to dir1
final Path newfoo = new Path(dir1, foo.getName());
hdfs.rename(foo, newfoo);
// delete snapshot s1
hdfs.deleteSnapshot(test, "s1");
// make sure the snapshot copy of file in s1 is merged to s0. For
// HDFS-4842, we need to make sure that we do not wrongly use s2 as the
// prior snapshot of s1.
final Path file_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2",
"foo/bar/file");
assertFalse(hdfs.exists(file_s2));
final Path file_s0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
"dir2/foo/bar/file");
assertTrue(hdfs.exists(file_s0));
// check dir1: foo should be in the created list of s0
INodeDirectory dir1Node = fsdir.getINode4Write(dir1.toString())
.asDirectory();
List<DirectoryDiff> dir1DiffList = dir1Node.getDiffs().asList();
assertEquals(1, dir1DiffList.size());
List<INode> dList = dir1DiffList.get(0).getChildrenDiff()
.getList(ListType.DELETED);
assertTrue(dList.isEmpty());
List<INode> cList = dir1DiffList.get(0).getChildrenDiff()
.getList(ListType.CREATED);
assertEquals(1, cList.size());
INode cNode = cList.get(0);
INode fooNode = fsdir.getINode4Write(newfoo.toString());
assertSame(cNode, fooNode);
// check foo and its subtree
final Path newbar = new Path(newfoo, bar.getName());
INodeDirectory barNode = fsdir.getINode4Write(newbar.toString())
.asDirectory();
assertSame(fooNode.asDirectory(), barNode.getParent());
// bar should only have a snapshot diff for s0
List<DirectoryDiff> barDiffList = barNode.getDiffs().asList();
assertEquals(1, barDiffList.size());
DirectoryDiff diff = barDiffList.get(0);
INodeDirectory testNode = fsdir.getINode4Write(test.toString())
.asDirectory();
Snapshot s0 = testNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertEquals(s0.getId(), diff.getSnapshotId());
// and file should be stored in the deleted list of this snapshot diff
assertEquals("file", diff.getChildrenDiff().getList(ListType.DELETED)
.get(0).getLocalName());
// check dir2: a WithName instance for foo should be in the deleted list
// of the snapshot diff for s2
INodeDirectory dir2Node = fsdir.getINode4Write(dir2.toString())
.asDirectory();
List<DirectoryDiff> dir2DiffList = dir2Node.getDiffs().asList();
// dir2Node should contain 1 snapshot diffs for s2
assertEquals(1, dir2DiffList.size());
dList = dir2DiffList.get(0).getChildrenDiff().getList(ListType.DELETED);
assertEquals(1, dList.size());
final Path foo_s2 = SnapshotTestHelper.getSnapshotPath(dir2, "s2",
foo.getName());
INodeReference.WithName fooNode_s2 =
(INodeReference.WithName) fsdir.getINode(foo_s2.toString());
assertSame(dList.get(0), fooNode_s2);
assertSame(fooNode.asReference().getReferredINode(),
fooNode_s2.getReferredINode());
restartClusterAndCheckImage(true);
}
/**
* Make sure we clean the whole subtree under a DstReference node after
* deleting a snapshot.
* see HDFS-5476.
*/
@Test
public void testCleanDstReference() throws Exception {
final Path test = new Path("/test");
final Path foo = new Path(test, "foo");
final Path bar = new Path(foo, "bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
// create file after s0 so that the file should not be included in s0
final Path fileInBar = new Path(bar, "file");
DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED);
// rename foo --> foo2
final Path foo2 = new Path(test, "foo2");
hdfs.rename(foo, foo2);
// create snapshot s1, note the file is included in s1
hdfs.createSnapshot(test, "s1");
// delete bar and foo2
hdfs.delete(new Path(foo2, "bar"), true);
hdfs.delete(foo2, true);
final Path sfileInBar = SnapshotTestHelper.getSnapshotPath(test, "s1",
"foo2/bar/file");
assertTrue(hdfs.exists(sfileInBar));
hdfs.deleteSnapshot(test, "s1");
assertFalse(hdfs.exists(sfileInBar));
restartClusterAndCheckImage(true);
// make sure the file under bar is deleted
final Path barInS0 = SnapshotTestHelper.getSnapshotPath(test, "s0",
"foo/bar");
INodeDirectory barNode = fsdir.getINode(barInS0.toString()).asDirectory();
assertEquals(0, barNode.getChildrenList(Snapshot.CURRENT_STATE_ID).size());
List<DirectoryDiff> diffList = barNode.getDiffs().asList();
assertEquals(1, diffList.size());
DirectoryDiff diff = diffList.get(0);
assertEquals(0, diff.getChildrenDiff().getList(ListType.DELETED).size());
assertEquals(0, diff.getChildrenDiff().getList(ListType.CREATED).size());
}
/**
* Rename of the underconstruction file in snapshot should not fail NN restart
* after checkpoint. Unit test for HDFS-5425.
*/
@Test
public void testRenameUCFileInSnapshot() throws Exception {
final Path test = new Path("/test");
final Path foo = new Path(test, "foo");
final Path bar = new Path(foo, "bar");
hdfs.mkdirs(foo);
// create a file and keep it as underconstruction.
hdfs.create(bar);
SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
// rename bar --> bar2
final Path bar2 = new Path(foo, "bar2");
hdfs.rename(bar, bar2);
// save namespace and restart
restartClusterAndCheckImage(true);
}
/**
* Similar with testRenameUCFileInSnapshot, but do renaming first and then
* append file without closing it. Unit test for HDFS-5425.
*/
@Test
public void testAppendFileAfterRenameInSnapshot() throws Exception {
final Path test = new Path("/test");
final Path foo = new Path(test, "foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED);
SnapshotTestHelper.createSnapshot(hdfs, test, "s0");
// rename bar --> bar2
final Path bar2 = new Path(foo, "bar2");
hdfs.rename(bar, bar2);
// append file and keep it as underconstruction.
FSDataOutputStream out = hdfs.append(bar2);
out.writeByte(0);
((DFSOutputStream) out.getWrappedStream()).hsync(
EnumSet.of(SyncFlag.UPDATE_LENGTH));
// save namespace and restart
restartClusterAndCheckImage(true);
}
@Test
public void testRenameWithOverWrite() throws Exception {
final Path root = new Path("/");
final Path foo = new Path(root, "foo");
final Path file1InFoo = new Path(foo, "file1");
final Path file2InFoo = new Path(foo, "file2");
final Path file3InFoo = new Path(foo, "file3");
DFSTestUtil.createFile(hdfs, file1InFoo, 1L, REPL, SEED);
DFSTestUtil.createFile(hdfs, file2InFoo, 1L, REPL, SEED);
DFSTestUtil.createFile(hdfs, file3InFoo, 1L, REPL, SEED);
final Path bar = new Path(root, "bar");
hdfs.mkdirs(bar);
SnapshotTestHelper.createSnapshot(hdfs, root, "s0");
// move file1 from foo to bar
final Path fileInBar = new Path(bar, "file1");
hdfs.rename(file1InFoo, fileInBar);
// rename bar to newDir
final Path newDir = new Path(root, "newDir");
hdfs.rename(bar, newDir);
// move file2 from foo to newDir
final Path file2InNewDir = new Path(newDir, "file2");
hdfs.rename(file2InFoo, file2InNewDir);
// move file3 from foo to newDir and rename it to file1, this will overwrite
// the original file1
final Path file1InNewDir = new Path(newDir, "file1");
hdfs.rename(file3InFoo, file1InNewDir, Rename.OVERWRITE);
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
SnapshotDiffReport report = hdfs.getSnapshotDiffReport(root, "s0", "s1");
LOG.info("DiffList is \n\"" + report.toString() + "\"");
List<DiffReportEntry> entries = report.getDiffList();
assertEquals(7, entries.size());
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, "", null));
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, foo.getName(), null));
assertTrue(existsInDiffReport(entries, DiffType.MODIFY, bar.getName(), null));
assertTrue(existsInDiffReport(entries, DiffType.DELETE, "foo/file1", null));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, "bar", "newDir"));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file2", "newDir/file2"));
assertTrue(existsInDiffReport(entries, DiffType.RENAME, "foo/file3", "newDir/file1"));
}
}
| 96,673
| 39.080431
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestDisallowModifyROSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.util.ArrayList;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
/**
* This class tests snapshot functionality. One or multiple snapshots are
* created. The snapshotted directory is changed and verification is done to
* ensure snapshots remain unchanges.
*/
public class TestDisallowModifyROSnapshot {
private final static Path dir = new Path("/TestSnapshot");
private final static Path sub1 = new Path(dir, "sub1");
private final static Path sub2 = new Path(dir, "sub2");
protected static Configuration conf;
protected static MiniDFSCluster cluster;
protected static FSNamesystem fsn;
protected static DistributedFileSystem fs;
/**
* The list recording all previous snapshots. Each element in the array
* records a snapshot root.
*/
protected static ArrayList<Path> snapshotList = new ArrayList<Path>();
static Path objInSnapshot = null;
@BeforeClass
public static void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fs = cluster.getFileSystem();
Path path1 = new Path(sub1, "dir1");
assertTrue(fs.mkdirs(path1));
Path path2 = new Path(sub2, "dir2");
assertTrue(fs.mkdirs(path2));
SnapshotTestHelper.createSnapshot(fs, sub1, "testSnapshot");
objInSnapshot = SnapshotTestHelper.getSnapshotPath(sub1, "testSnapshot",
"dir1");
}
@AfterClass
public static void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testSetReplication() throws Exception {
fs.setReplication(objInSnapshot, (short) 1);
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testSetPermission() throws Exception {
fs.setPermission(objInSnapshot, new FsPermission("777"));
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testSetOwner() throws Exception {
fs.setOwner(objInSnapshot, "username", "groupname");
}
@Test (timeout=60000)
public void testRename() throws Exception {
try {
fs.rename(objInSnapshot, new Path("/invalid/path"));
fail("Didn't throw SnapshotAccessControlException");
} catch (SnapshotAccessControlException e) { /* Ignored */ }
try {
fs.rename(sub2, objInSnapshot);
fail("Didn't throw SnapshotAccessControlException");
} catch (SnapshotAccessControlException e) { /* Ignored */ }
try {
fs.rename(sub2, objInSnapshot, (Options.Rename) null);
fail("Didn't throw SnapshotAccessControlException");
} catch (SnapshotAccessControlException e) { /* Ignored */ }
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testDelete() throws Exception {
fs.delete(objInSnapshot, true);
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testQuota() throws Exception {
fs.setQuota(objInSnapshot, 100, 100);
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testSetTime() throws Exception {
fs.setTimes(objInSnapshot, 100, 100);
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testCreate() throws Exception {
@SuppressWarnings("deprecation")
DFSClient dfsclient = new DFSClient(conf);
dfsclient.create(objInSnapshot.toString(), true);
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testAppend() throws Exception {
fs.append(objInSnapshot, 65535, null);
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testMkdir() throws Exception {
fs.mkdirs(objInSnapshot, new FsPermission("777"));
}
@Test(timeout=60000, expected = SnapshotAccessControlException.class)
public void testCreateSymlink() throws Exception {
@SuppressWarnings("deprecation")
DFSClient dfsclient = new DFSClient(conf);
dfsclient.createSymlink(sub2.toString(), "/TestSnapshot/sub1/.snapshot",
false);
}
}
| 5,605
| 34.935897
| 76
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotFileLength.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.PrintStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.hdfs.AppendTestUtil;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import static org.hamcrest.CoreMatchers.is;
import static org.hamcrest.CoreMatchers.not;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.util.ToolRunner;
public class TestSnapshotFileLength {
private static final long SEED = 0;
private static final short REPLICATION = 1;
private static final int BLOCKSIZE = 1024;
private static final Configuration conf = new Configuration();
private static MiniDFSCluster cluster;
private static DistributedFileSystem hdfs;
private final Path dir = new Path("/TestSnapshotFileLength");
private final Path sub = new Path(dir, "sub1");
private final String file1Name = "file1";
private final String snapshot1 = "snapshot1";
@Before
public void setUp() throws Exception {
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, BLOCKSIZE);
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
/**
* Test that we cannot read a file beyond its snapshot length
* when accessing it via a snapshot path.
*
*/
@Test (timeout=300000)
public void testSnapshotfileLength() throws Exception {
hdfs.mkdirs(sub);
int bytesRead;
byte[] buffer = new byte[BLOCKSIZE * 8];
int origLen = BLOCKSIZE + 1;
int toAppend = BLOCKSIZE;
FSDataInputStream fis = null;
FileStatus fileStatus = null;
// Create and write a file.
Path file1 = new Path(sub, file1Name);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, 0, BLOCKSIZE, REPLICATION, SEED);
DFSTestUtil.appendFile(hdfs, file1, origLen);
// Create a snapshot on the parent directory.
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub, snapshot1);
Path file1snap1
= SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
final FileChecksum snapChksum1 = hdfs.getFileChecksum(file1snap1);
assertThat("file and snapshot file checksums are not equal",
hdfs.getFileChecksum(file1), is(snapChksum1));
// Append to the file.
FSDataOutputStream out = hdfs.append(file1);
// Nothing has been appended yet. All checksums should still be equal.
// HDFS-8150:Fetching checksum for file under construction should fail
try {
hdfs.getFileChecksum(file1);
fail("getFileChecksum should fail for files "
+ "with blocks under construction");
} catch (IOException ie) {
assertTrue(ie.getMessage().contains(
"Fail to get checksum, since file " + file1
+ " is under construction."));
}
assertThat("snapshot checksum (post-open for append) has changed",
hdfs.getFileChecksum(file1snap1), is(snapChksum1));
try {
AppendTestUtil.write(out, 0, toAppend);
// Test reading from snapshot of file that is open for append
byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs, file1snap1);
assertThat("Wrong data size in snapshot.",
dataFromSnapshot.length, is(origLen));
// Verify that checksum didn't change
assertThat("snapshot checksum (post-append) has changed",
hdfs.getFileChecksum(file1snap1), is(snapChksum1));
} finally {
out.close();
}
assertThat("file and snapshot file checksums (post-close) are equal",
hdfs.getFileChecksum(file1), not(snapChksum1));
assertThat("snapshot file checksum (post-close) has changed",
hdfs.getFileChecksum(file1snap1), is(snapChksum1));
// Make sure we can read the entire file via its non-snapshot path.
fileStatus = hdfs.getFileStatus(file1);
assertThat(fileStatus.getLen(), is((long) origLen + toAppend));
fis = hdfs.open(file1);
bytesRead = fis.read(0, buffer, 0, buffer.length);
assertThat(bytesRead, is(origLen + toAppend));
fis.close();
// Try to open the file via its snapshot path.
fis = hdfs.open(file1snap1);
fileStatus = hdfs.getFileStatus(file1snap1);
assertThat(fileStatus.getLen(), is((long) origLen));
// Make sure we can only read up to the snapshot length.
bytesRead = fis.read(0, buffer, 0, buffer.length);
assertThat(bytesRead, is(origLen));
fis.close();
byte[] dataFromSnapshot = DFSTestUtil.readFileBuffer(hdfs,
file1snap1);
assertThat("Wrong data size in snapshot.",
dataFromSnapshot.length, is(origLen));
}
/**
* Adding as part of jira HDFS-5343
* Test for checking the cat command on snapshot path it
* cannot read a file beyond snapshot file length
* @throws Exception
*/
@Test (timeout = 600000)
public void testSnapshotFileLengthWithCatCommand() throws Exception {
FSDataInputStream fis = null;
FileStatus fileStatus = null;
int bytesRead;
byte[] buffer = new byte[BLOCKSIZE * 8];
hdfs.mkdirs(sub);
Path file1 = new Path(sub, file1Name);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub, snapshot1);
DFSTestUtil.appendFile(hdfs, file1, BLOCKSIZE);
// Make sure we can read the entire file via its non-snapshot path.
fileStatus = hdfs.getFileStatus(file1);
assertEquals("Unexpected file length", BLOCKSIZE * 2, fileStatus.getLen());
fis = hdfs.open(file1);
bytesRead = fis.read(buffer, 0, buffer.length);
assertEquals("Unexpected # bytes read", BLOCKSIZE * 2, bytesRead);
fis.close();
Path file1snap1 =
SnapshotTestHelper.getSnapshotPath(sub, snapshot1, file1Name);
fis = hdfs.open(file1snap1);
fileStatus = hdfs.getFileStatus(file1snap1);
assertEquals(fileStatus.getLen(), BLOCKSIZE);
// Make sure we can only read up to the snapshot length.
bytesRead = fis.read(buffer, 0, buffer.length);
assertEquals("Unexpected # bytes read", BLOCKSIZE, bytesRead);
fis.close();
PrintStream outBackup = System.out;
PrintStream errBackup = System.err;
ByteArrayOutputStream bao = new ByteArrayOutputStream();
System.setOut(new PrintStream(bao));
System.setErr(new PrintStream(bao));
// Make sure we can cat the file upto to snapshot length
FsShell shell = new FsShell();
try {
ToolRunner.run(conf, shell, new String[] { "-cat",
"/TestSnapshotFileLength/sub1/.snapshot/snapshot1/file1" });
assertEquals("Unexpected # bytes from -cat", BLOCKSIZE, bao.size());
} finally {
System.setOut(outBackup);
System.setErr(errBackup);
}
}
}
| 8,385
| 35.620087
| 84
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotManager.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.mockito.Matchers.anyObject;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.spy;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
import org.apache.hadoop.util.StringUtils;
import org.junit.Assert;
import org.junit.Test;
/**
* Testing snapshot manager functionality.
*/
public class TestSnapshotManager {
private static final int testMaxSnapshotLimit = 7;
/**
* Test that the global limit on snapshots is honored.
*/
@Test (timeout=10000)
public void testSnapshotLimits() throws Exception {
// Setup mock objects for SnapshotManager.createSnapshot.
//
INodeDirectory ids = mock(INodeDirectory.class);
FSDirectory fsdir = mock(FSDirectory.class);
INodesInPath iip = mock(INodesInPath.class);
SnapshotManager sm = spy(new SnapshotManager(fsdir));
doReturn(ids).when(sm).getSnapshottableRoot((INodesInPath) anyObject());
doReturn(testMaxSnapshotLimit).when(sm).getMaxSnapshotID();
// Create testMaxSnapshotLimit snapshots. These should all succeed.
//
for (Integer i = 0; i < testMaxSnapshotLimit; ++i) {
sm.createSnapshot(iip, "dummy", i.toString());
}
// Attempt to create one more snapshot. This should fail due to snapshot
// ID rollover.
//
try {
sm.createSnapshot(iip, "dummy", "shouldFailSnapshot");
Assert.fail("Expected SnapshotException not thrown");
} catch (SnapshotException se) {
Assert.assertTrue(
StringUtils.toLowerCase(se.getMessage()).contains("rollover"));
}
// Delete a snapshot to free up a slot.
//
sm.deleteSnapshot(iip, "", mock(INode.ReclaimContext.class));
// Attempt to create a snapshot again. It should still fail due
// to snapshot ID rollover.
//
try {
sm.createSnapshot(iip, "dummy", "shouldFailSnapshot2");
Assert.fail("Expected SnapshotException not thrown");
} catch (SnapshotException se) {
Assert.assertTrue(
StringUtils.toLowerCase(se.getMessage()).contains("rollover"));
}
}
}
| 3,208
| 34.655556
| 76
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
/**
* Test cases for snapshot-related information in blocksMap.
*/
public class TestSnapshotBlocksMap {
private static final long seed = 0;
private static final short REPLICATION = 3;
private static final int BLOCKSIZE = 1024;
private final Path dir = new Path("/TestSnapshot");
private final Path sub1 = new Path(dir, "sub1");
protected Configuration conf;
protected MiniDFSCluster cluster;
protected FSNamesystem fsn;
FSDirectory fsdir;
BlockManager blockmanager;
protected DistributedFileSystem hdfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
blockmanager = fsn.getBlockManager();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
void assertAllNull(INodeFile inode, Path path, String[] snapshots) throws Exception {
Assert.assertNull(inode.getBlocks());
assertINodeNull(path.toString());
assertINodeNullInSnapshots(path, snapshots);
}
void assertINodeNull(String path) throws Exception {
Assert.assertNull(fsdir.getINode(path));
}
void assertINodeNullInSnapshots(Path path, String... snapshots) throws Exception {
for(String s : snapshots) {
assertINodeNull(SnapshotTestHelper.getSnapshotPath(
path.getParent(), s, path.getName()).toString());
}
}
static INodeFile assertBlockCollection(String path, int numBlocks,
final FSDirectory dir, final BlockManager blkManager) throws Exception {
final INodeFile file = INodeFile.valueOf(dir.getINode(path), path);
assertEquals(numBlocks, file.getBlocks().length);
for(BlockInfo b : file.getBlocks()) {
assertBlockCollection(blkManager, file, b);
}
return file;
}
static void assertBlockCollection(final BlockManager blkManager,
final INodeFile file, final BlockInfo b) {
Assert.assertSame(b, blkManager.getStoredBlock(b));
Assert.assertSame(file, blkManager.getBlockCollection(b));
Assert.assertSame(file, b.getBlockCollection());
}
/**
* Test deleting a file with snapshots. Need to check the blocksMap to make
* sure the corresponding record is updated correctly.
*/
@Test (timeout=60000)
public void testDeletionWithSnapshots() throws Exception {
Path file0 = new Path(sub1, "file0");
Path file1 = new Path(sub1, "file1");
Path sub2 = new Path(sub1, "sub2");
Path file2 = new Path(sub2, "file2");
Path file3 = new Path(sub1, "file3");
Path file4 = new Path(sub1, "file4");
Path file5 = new Path(sub1, "file5");
// Create file under sub1
DFSTestUtil.createFile(hdfs, file0, 4*BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, 2*BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 3*BLOCKSIZE, REPLICATION, seed);
// Normal deletion
{
final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir,
blockmanager);
BlockInfo[] blocks = f2.getBlocks();
hdfs.delete(sub2, true);
// The INode should have been removed from the blocksMap
for(BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
}
// Create snapshots for sub1
final String[] snapshots = {"s0", "s1", "s2"};
DFSTestUtil.createFile(hdfs, file3, 5*BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]);
DFSTestUtil.createFile(hdfs, file4, 1*BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]);
DFSTestUtil.createFile(hdfs, file5, 7*BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]);
// set replication so that the inode should be replaced for snapshots
{
INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir,
blockmanager);
Assert.assertSame(INodeFile.class, f1.getClass());
hdfs.setReplication(file1, (short)2);
f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
assertTrue(f1.isWithSnapshot());
assertFalse(f1.isUnderConstruction());
}
// Check the block information for file0
final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir,
blockmanager);
BlockInfo[] blocks0 = f0.getBlocks();
// Also check the block information for snapshot of file0
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0",
file0.getName());
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
// Delete file0
hdfs.delete(file0, true);
// Make sure the blocks of file0 is still in blocksMap
for(BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
// Compare the INode in the blocksMap with INodes for snapshots
String s1f0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1",
file0.getName()).toString();
assertBlockCollection(s1f0, 4, fsdir, blockmanager);
// Delete snapshot s1
hdfs.deleteSnapshot(sub1, "s1");
// Make sure the first block of file0 is still in blocksMap
for(BlockInfo b : blocks0) {
assertNotNull(blockmanager.getBlockCollection(b));
}
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
try {
INodeFile.valueOf(fsdir.getINode(s1f0), s1f0);
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
} catch (IOException e) {
assertExceptionContains("File does not exist: " + s1f0, e);
}
}
/*
* Try to read the files inside snapshot but deleted in original place after
* restarting post checkpoint. refer HDFS-5427
*/
@Test(timeout = 30000)
public void testReadSnapshotFileWithCheckpoint() throws Exception {
Path foo = new Path("/foo");
hdfs.mkdirs(foo);
hdfs.allowSnapshot(foo);
Path bar = new Path("/foo/bar");
DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
hdfs.createSnapshot(foo, "s1");
assertTrue(hdfs.delete(bar, true));
// checkpoint
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
// restart namenode to load snapshot files from fsimage
cluster.restartNameNode(true);
String snapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
DFSTestUtil.readFile(hdfs, new Path(snapshotPath));
}
/*
* Try to read the files inside snapshot but renamed to different file and
* deleted after restarting post checkpoint. refer HDFS-5427
*/
@Test(timeout = 30000)
public void testReadRenamedSnapshotFileWithCheckpoint() throws Exception {
final Path foo = new Path("/foo");
final Path foo2 = new Path("/foo2");
hdfs.mkdirs(foo);
hdfs.mkdirs(foo2);
hdfs.allowSnapshot(foo);
hdfs.allowSnapshot(foo2);
final Path bar = new Path(foo, "bar");
final Path bar2 = new Path(foo2, "bar");
DFSTestUtil.createFile(hdfs, bar, 100, (short) 2, 100024L);
hdfs.createSnapshot(foo, "s1");
// rename to another snapshottable directory and take snapshot
assertTrue(hdfs.rename(bar, bar2));
hdfs.createSnapshot(foo2, "s2");
// delete the original renamed file to make sure blocks are not updated by
// the original file
assertTrue(hdfs.delete(bar2, true));
// checkpoint
NameNode nameNode = cluster.getNameNode();
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
NameNodeAdapter.leaveSafeMode(nameNode);
// restart namenode to load snapshot files from fsimage
cluster.restartNameNode(true);
// file in first snapshot
String barSnapshotPath = Snapshot.getSnapshotPath(foo.toString(), "s1/bar");
DFSTestUtil.readFile(hdfs, new Path(barSnapshotPath));
// file in second snapshot after rename+delete
String bar2SnapshotPath = Snapshot.getSnapshotPath(foo2.toString(),
"s2/bar");
DFSTestUtil.readFile(hdfs, new Path(bar2SnapshotPath));
}
/**
* Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
*/
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc()
.addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
null, barNode.getId(), null);
SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
barNode = fsdir.getINode4Write(bar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(2, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
assertEquals(0, blks[1].getNumBytes());
hdfs.delete(bar, true);
final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1",
bar.getName());
barNode = fsdir.getINode(sbar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
/**
* Make sure we delete 0-sized block when deleting an under-construction file
*/
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
final Path foo = new Path("/foo");
final Path subDir = new Path(foo, "sub");
final Path bar = new Path(subDir, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc()
.addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
null, barNode.getId(), null);
SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
barNode = fsdir.getINode4Write(bar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(2, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
assertEquals(0, blks[1].getNumBytes());
hdfs.delete(subDir, true);
final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
barNode = fsdir.getINode(sbar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
/**
* 1. rename under-construction file with 0-sized blocks after snapshot.
* 2. delete the renamed directory.
* make sure we delete the 0-sized block.
* see HDFS-5476.
*/
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
final Path foo = new Path("/foo");
final Path subDir = new Path(foo, "sub");
final Path bar = new Path(subDir, "bar");
DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
hdfs.append(bar);
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
cluster.getNameNodeRpc()
.addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
null, barNode.getId(), null);
SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
// rename bar
final Path bar2 = new Path(subDir, "bar2");
hdfs.rename(bar, bar2);
INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
blks = bar2Node.getBlocks();
assertEquals(2, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
assertEquals(0, blks[1].getNumBytes());
// delete subDir
hdfs.delete(subDir, true);
final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
barNode = fsdir.getINode(sbar.toString()).asFile();
blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
/**
* Make sure that a delete of a non-zero-length file which results in a
* zero-length file in a snapshot works.
*/
@Test
public void testDeletionOfLaterBlocksWithZeroSizeFirstBlock() throws Exception {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
final byte[] testData = "foo bar baz".getBytes();
// Create a zero-length file.
DFSTestUtil.createFile(hdfs, bar, 0, REPLICATION, 0L);
assertEquals(0, fsdir.getINode4Write(bar.toString()).asFile().getBlocks().length);
// Create a snapshot that includes that file.
SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
// Extend that file.
FSDataOutputStream out = hdfs.append(bar);
out.write(testData);
out.close();
INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
BlockInfo[] blks = barNode.getBlocks();
assertEquals(1, blks.length);
assertEquals(testData.length, blks[0].getNumBytes());
// Delete the file.
hdfs.delete(bar, true);
// Now make sure that the NN can still save an fsimage successfully.
cluster.getNameNode().getRpcServer().setSafeMode(
SafeModeAction.SAFEMODE_ENTER, false);
cluster.getNameNode().getRpcServer().saveNamespace();
}
}
| 16,274
| 36.242563
| 92
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException;
import java.io.PrintStream;
import java.io.RandomAccessFile;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Random;
import org.apache.commons.io.output.NullOutputStream;
import org.apache.commons.logging.impl.Log4JLogger;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node;
import org.apache.hadoop.hdfs.tools.offlineImageViewer.PBImageXmlWriter;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* This class tests snapshot functionality. One or multiple snapshots are
* created. The snapshotted directory is changed and verification is done to
* ensure snapshots remain unchanges.
*/
public class TestSnapshot {
{
((Log4JLogger)INode.LOG).getLogger().setLevel(Level.ALL);
SnapshotTestHelper.disableLogs();
}
private static final long seed;
private static final Random random;
static {
seed = Time.now();
random = new Random(seed);
System.out.println("Random seed: " + seed);
}
protected static final short REPLICATION = 3;
protected static final int BLOCKSIZE = 1024;
/** The number of times snapshots are created for a snapshottable directory */
public static final int SNAPSHOT_ITERATION_NUMBER = 20;
/** Height of directory tree used for testing */
public static final int DIRECTORY_TREE_LEVEL = 5;
protected Configuration conf;
protected static MiniDFSCluster cluster;
protected static FSNamesystem fsn;
protected static FSDirectory fsdir;
protected DistributedFileSystem hdfs;
private static final String testDir =
System.getProperty("test.build.data", "build/test/data");
@Rule
public ExpectedException exception = ExpectedException.none();
/**
* The list recording all previous snapshots. Each element in the array
* records a snapshot root.
*/
protected static final ArrayList<Path> snapshotList = new ArrayList<Path>();
/**
* Check {@link SnapshotTestHelper.TestDirectoryTree}
*/
private TestDirectoryTree dirTree;
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
dirTree = new TestDirectoryTree(DIRECTORY_TREE_LEVEL, hdfs);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
static int modificationCount = 0;
/**
* Make changes (modification, deletion, creation) to the current files/dir.
* Then check if the previous snapshots are still correct.
*
* @param modifications Modifications that to be applied to the current dir.
*/
private void modifyCurrentDirAndCheckSnapshots(Modification[] modifications)
throws Exception {
for (Modification modification : modifications) {
System.out.println(++modificationCount + ") " + modification);
modification.loadSnapshots();
modification.modify();
modification.checkSnapshots();
}
}
/**
* Create two snapshots in each iteration. Each time we will create a snapshot
* for the top node, then randomly pick a dir in the tree and create
* snapshot for it.
*
* Finally check the snapshots are created correctly.
*/
protected TestDirectoryTree.Node[] createSnapshots() throws Exception {
TestDirectoryTree.Node[] nodes = new TestDirectoryTree.Node[2];
// Each time we will create a snapshot for the top level dir
Path root = SnapshotTestHelper.createSnapshot(hdfs,
dirTree.topNode.nodePath, nextSnapshotName());
snapshotList.add(root);
nodes[0] = dirTree.topNode;
SnapshotTestHelper.checkSnapshotCreation(hdfs, root, nodes[0].nodePath);
// Then randomly pick one dir from the tree (cannot be the top node) and
// create snapshot for it
ArrayList<TestDirectoryTree.Node> excludedList =
new ArrayList<TestDirectoryTree.Node>();
excludedList.add(nodes[0]);
nodes[1] = dirTree.getRandomDirNode(random, excludedList);
root = SnapshotTestHelper.createSnapshot(hdfs, nodes[1].nodePath,
nextSnapshotName());
snapshotList.add(root);
SnapshotTestHelper.checkSnapshotCreation(hdfs, root, nodes[1].nodePath);
return nodes;
}
private File getDumpTreeFile(String dir, String suffix) {
return new File(dir, String.format("dumptree_%s", suffix));
}
/**
* Restart the cluster to check edit log applying and fsimage saving/loading
*/
private void checkFSImage() throws Exception {
File fsnBefore = getDumpTreeFile(testDir, "before");
File fsnMiddle = getDumpTreeFile(testDir, "middle");
File fsnAfter = getDumpTreeFile(testDir, "after");
SnapshotTestHelper.dumpTree2File(fsdir, fsnBefore);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// later check fsnMiddle to see if the edit log is applied correctly
SnapshotTestHelper.dumpTree2File(fsdir, fsnMiddle);
// save namespace and restart cluster
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.shutdown();
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPLICATION).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
// dump the namespace loaded from fsimage
SnapshotTestHelper.dumpTree2File(fsdir, fsnAfter);
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle, true);
SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true);
}
/**
* Main test, where we will go in the following loop:
* <pre>
* Create snapshot and check the creation <--+
* -> Change the current/live files/dir |
* -> Check previous snapshots -----------------+
* </pre>
*/
@Test
public void testSnapshot() throws Throwable {
try {
runTestSnapshot(SNAPSHOT_ITERATION_NUMBER);
} catch(Throwable t) {
SnapshotTestHelper.LOG.info("FAILED", t);
SnapshotTestHelper.dumpTree("FAILED", cluster);
throw t;
}
}
/**
* Test if the OfflineImageViewerPB can correctly parse a fsimage containing
* snapshots
*/
@Test
public void testOfflineImageViewer() throws Exception {
runTestSnapshot(1);
// retrieve the fsimage. Note that we already save namespace to fsimage at
// the end of each iteration of runTestSnapshot.
File originalFsimage = FSImageTestUtil.findLatestImageFile(
FSImageTestUtil.getFSImage(
cluster.getNameNode()).getStorage().getStorageDir(0));
assertNotNull("Didn't generate or can't find fsimage", originalFsimage);
PrintStream o = new PrintStream(NullOutputStream.NULL_OUTPUT_STREAM);
PBImageXmlWriter v = new PBImageXmlWriter(new Configuration(), o);
v.visit(new RandomAccessFile(originalFsimage, "r"));
}
private void runTestSnapshot(int iteration) throws Exception {
for (int i = 0; i < iteration; i++) {
// create snapshot and check the creation
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
TestDirectoryTree.Node[] ssNodes = createSnapshots();
// prepare the modifications for the snapshotted dirs
// we cover the following directories: top, new, and a random
ArrayList<TestDirectoryTree.Node> excludedList =
new ArrayList<TestDirectoryTree.Node>();
TestDirectoryTree.Node[] modNodes =
new TestDirectoryTree.Node[ssNodes.length + 1];
for (int n = 0; n < ssNodes.length; n++) {
modNodes[n] = ssNodes[n];
excludedList.add(ssNodes[n]);
}
modNodes[modNodes.length - 1] = dirTree.getRandomDirNode(random,
excludedList);
Modification[] mods = prepareModifications(modNodes);
// make changes to the directories/files
modifyCurrentDirAndCheckSnapshots(mods);
// also update the metadata of directories
TestDirectoryTree.Node chmodDir = dirTree.getRandomDirNode(random, null);
Modification chmod = new FileChangePermission(chmodDir.nodePath, hdfs,
genRandomPermission());
String[] userGroup = genRandomOwner();
TestDirectoryTree.Node chownDir = dirTree.getRandomDirNode(random,
Arrays.asList(chmodDir));
Modification chown = new FileChown(chownDir.nodePath, hdfs, userGroup[0],
userGroup[1]);
modifyCurrentDirAndCheckSnapshots(new Modification[]{chmod, chown});
// check fsimage saving/loading
checkFSImage();
}
}
/**
* A simple test that updates a sub-directory of a snapshottable directory
* with snapshots
*/
@Test (timeout=60000)
public void testUpdateDirectory() throws Exception {
Path dir = new Path("/dir");
Path sub = new Path(dir, "sub");
Path subFile = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, subFile, BLOCKSIZE, REPLICATION, seed);
FileStatus oldStatus = hdfs.getFileStatus(sub);
hdfs.allowSnapshot(dir);
hdfs.createSnapshot(dir, "s1");
hdfs.setTimes(sub, 100L, 100L);
Path snapshotPath = SnapshotTestHelper.getSnapshotPath(dir, "s1", "sub");
FileStatus snapshotStatus = hdfs.getFileStatus(snapshotPath);
assertEquals(oldStatus.getModificationTime(),
snapshotStatus.getModificationTime());
assertEquals(oldStatus.getAccessTime(), snapshotStatus.getAccessTime());
}
/**
* Test creating a snapshot with illegal name
*/
@Test
public void testCreateSnapshotWithIllegalName() throws Exception {
final Path dir = new Path("/dir");
hdfs.mkdirs(dir);
final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
try {
hdfs.createSnapshot(dir, name1);
fail("Exception expected when an illegal name is given");
} catch (RemoteException e) {
String errorMsg = "Invalid path name Invalid snapshot name: " + name1;
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
final String[] badNames = new String[] { "foo" + Path.SEPARATOR,
Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
for (String badName : badNames) {
try {
hdfs.createSnapshot(dir, badName);
fail("Exception expected when an illegal name is given");
} catch (RemoteException e) {
String errorMsg = "Invalid path name Invalid snapshot name: " + badName ;
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
}
}
/**
* Creating snapshots for a directory that is not snapshottable must fail.
*/
@Test (timeout=60000)
public void testSnapshottableDirectory() throws Exception {
Path dir = new Path("/TestSnapshot/sub");
Path file0 = new Path(dir, "file0");
Path file1 = new Path(dir, "file1");
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
try {
hdfs.createSnapshot(dir, "s1");
fail("Exception expected: " + dir + " is not snapshottable");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory: " + dir, e);
}
try {
hdfs.deleteSnapshot(dir, "s1");
fail("Exception expected: " + dir + " is not a snapshottale dir");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory: " + dir, e);
}
try {
hdfs.renameSnapshot(dir, "s1", "s2");
fail("Exception expected: " + dir + " is not a snapshottale dir");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory: " + dir, e);
}
}
/**
* Test multiple calls of allowSnapshot and disallowSnapshot, to make sure
* they are idempotent
*/
@Test
public void testAllowAndDisallowSnapshot() throws Exception {
final Path dir = new Path("/dir");
final Path file0 = new Path(dir, "file0");
final Path file1 = new Path(dir, "file1");
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
hdfs.allowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
// call allowSnapshot again
hdfs.allowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
// disallowSnapshot on dir
hdfs.disallowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
// do it again
hdfs.disallowSnapshot(dir);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertFalse(dirNode.isSnapshottable());
// same process on root
final Path root = new Path("/");
INodeDirectory rootNode = fsdir.getINode4Write(root.toString())
.asDirectory();
assertTrue(rootNode.isSnapshottable());
// root is snapshottable dir, but with 0 snapshot quota
assertEquals(0, rootNode.getDirectorySnapshottableFeature()
.getSnapshotQuota());
hdfs.allowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,
rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// call allowSnapshot again
hdfs.allowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(DirectorySnapshottableFeature.SNAPSHOT_LIMIT,
rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// disallowSnapshot on dir
hdfs.disallowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
// do it again
hdfs.disallowSnapshot(root);
rootNode = fsdir.getINode4Write(root.toString()).asDirectory();
assertTrue(rootNode.isSnapshottable());
assertEquals(0, rootNode.getDirectorySnapshottableFeature().getSnapshotQuota());
}
/**
* Prepare a list of modifications. A modification may be a file creation,
* file deletion, or a modification operation such as appending to an existing
* file.
*/
private Modification[] prepareModifications(TestDirectoryTree.Node[] nodes)
throws Exception {
ArrayList<Modification> mList = new ArrayList<Modification>();
for (TestDirectoryTree.Node node : nodes) {
// If the node does not have files in it, create files
if (node.fileList == null) {
node.initFileList(hdfs, node.nodePath.getName(), BLOCKSIZE,
REPLICATION, seed, 6);
}
//
// Modification iterations are as follows:
// Iteration 0 - create:fileList[5], delete:fileList[0],
// append:fileList[1], chmod:fileList[2],
// chown:fileList[3], change_replication:fileList[4].
// Set nullFileIndex to 0
//
// Iteration 1 - create:fileList[0], delete:fileList[1],
// append:fileList[2], chmod:fileList[3],
// chown:fileList[4], change_replication:fileList[5]
// Set nullFileIndex to 1
//
// Iteration 2 - create:fileList[1], delete:fileList[2],
// append:fileList[3], chmod:fileList[4],
// chown:fileList[5], change_replication:fileList[6]
// Set nullFileIndex to 2
// ...
//
Modification create = new FileCreation(
node.fileList.get(node.nullFileIndex), hdfs, BLOCKSIZE);
Modification delete = new FileDeletion(
node.fileList.get((node.nullFileIndex + 1) % node.fileList.size()),
hdfs);
Path f = node.fileList.get((node.nullFileIndex + 2) % node.fileList.size());
Modification append = new FileAppend(f, hdfs, BLOCKSIZE);
FileAppendNotClose appendNotClose = new FileAppendNotClose(f, hdfs, BLOCKSIZE);
Modification appendClose = new FileAppendClose(f, hdfs, BLOCKSIZE, appendNotClose);
Modification chmod = new FileChangePermission(
node.fileList.get((node.nullFileIndex + 3) % node.fileList.size()),
hdfs, genRandomPermission());
String[] userGroup = genRandomOwner();
Modification chown = new FileChown(
node.fileList.get((node.nullFileIndex + 4) % node.fileList.size()),
hdfs, userGroup[0], userGroup[1]);
Modification replication = new FileChangeReplication(
node.fileList.get((node.nullFileIndex + 5) % node.fileList.size()),
hdfs, (short) (random.nextInt(REPLICATION) + 1));
node.nullFileIndex = (node.nullFileIndex + 1) % node.fileList.size();
Modification dirChange = new DirCreationOrDeletion(node.nodePath, hdfs,
node, random.nextBoolean());
// dir rename
Node dstParent = dirTree.getRandomDirNode(random, Arrays.asList(nodes));
Modification dirRename = new DirRename(node.nodePath, hdfs, node,
dstParent);
mList.add(create);
mList.add(delete);
mList.add(append);
mList.add(appendNotClose);
mList.add(appendClose);
mList.add(chmod);
mList.add(chown);
mList.add(replication);
mList.add(dirChange);
mList.add(dirRename);
}
return mList.toArray(new Modification[mList.size()]);
}
/**
* @return A random FsPermission
*/
private FsPermission genRandomPermission() {
// randomly select between "rwx" and "rw-"
FsAction u = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
FsAction g = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
FsAction o = random.nextBoolean() ? FsAction.ALL : FsAction.READ_WRITE;
return new FsPermission(u, g, o);
}
/**
* @return A string array containing two string: the first string indicates
* the owner, and the other indicates the group
*/
private String[] genRandomOwner() {
String[] userGroup = new String[]{"dr.who", "unknown"};
return userGroup;
}
private static int snapshotCount = 0;
/** @return The next snapshot name */
static String nextSnapshotName() {
return String.format("s-%d", ++snapshotCount);
}
/**
* Base class to present changes applied to current file/dir. A modification
* can be file creation, deletion, or other modifications such as appending on
* an existing file. Three abstract methods need to be implemented by
* subclasses: loadSnapshots() captures the states of snapshots before the
* modification, modify() applies the modification to the current directory,
* and checkSnapshots() verifies the snapshots do not change after the
* modification.
*/
static abstract class Modification {
protected final Path file;
protected final FileSystem fs;
final String type;
Modification(Path file, FileSystem fs, String type) {
this.file = file;
this.fs = fs;
this.type = type;
}
abstract void loadSnapshots() throws Exception;
abstract void modify() throws Exception;
abstract void checkSnapshots() throws Exception;
@Override
public String toString() {
return getClass().getSimpleName() + ":" + type + ":" + file;
}
}
/**
* Modifications that change the file status. We check the FileStatus of
* snapshot files before/after the modification.
*/
static abstract class FileStatusChange extends Modification {
protected final HashMap<Path, FileStatus> statusMap;
FileStatusChange(Path file, FileSystem fs, String type) {
super(file, fs, type);
statusMap = new HashMap<Path, FileStatus>();
}
@Override
void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) {
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file);
if (snapshotFile != null) {
if (fs.exists(snapshotFile)) {
FileStatus status = fs.getFileStatus(snapshotFile);
statusMap.put(snapshotFile, status);
} else {
statusMap.put(snapshotFile, null);
}
}
}
}
@Override
void checkSnapshots() throws Exception {
for (Path snapshotFile : statusMap.keySet()) {
FileStatus currentStatus = fs.exists(snapshotFile) ? fs
.getFileStatus(snapshotFile) : null;
FileStatus originalStatus = statusMap.get(snapshotFile);
assertEquals(currentStatus, originalStatus);
if (currentStatus != null) {
String s = null;
if (!currentStatus.toString().equals(originalStatus.toString())) {
s = "FAILED: " + getClass().getSimpleName()
+ ": file=" + file + ", snapshotFile" + snapshotFile
+ "\n\n currentStatus = " + currentStatus
+ "\noriginalStatus = " + originalStatus
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
SnapshotTestHelper.dumpTree(s, cluster);
}
assertEquals(s, currentStatus.toString(), originalStatus.toString());
}
}
}
}
/**
* Change the file permission
*/
static class FileChangePermission extends FileStatusChange {
private final FsPermission newPermission;
FileChangePermission(Path file, FileSystem fs, FsPermission newPermission) {
super(file, fs, "chmod");
this.newPermission = newPermission;
}
@Override
void modify() throws Exception {
assertTrue(fs.exists(file));
fs.setPermission(file, newPermission);
}
}
/**
* Change the replication factor of file
*/
static class FileChangeReplication extends FileStatusChange {
private final short newReplication;
FileChangeReplication(Path file, FileSystem fs, short replication) {
super(file, fs, "replication");
this.newReplication = replication;
}
@Override
void modify() throws Exception {
assertTrue(fs.exists(file));
fs.setReplication(file, newReplication);
}
}
/**
* Change the owner:group of a file
*/
static class FileChown extends FileStatusChange {
private final String newUser;
private final String newGroup;
FileChown(Path file, FileSystem fs, String user, String group) {
super(file, fs, "chown");
this.newUser = user;
this.newGroup = group;
}
@Override
void modify() throws Exception {
assertTrue(fs.exists(file));
fs.setOwner(file, newUser, newGroup);
}
}
/**
* Appending a specified length to an existing file
*/
static class FileAppend extends Modification {
final int appendLen;
private final HashMap<Path, Long> snapshotFileLengthMap;
FileAppend(Path file, FileSystem fs, int len) {
super(file, fs, "append");
this.appendLen = len;
this.snapshotFileLengthMap = new HashMap<Path, Long>();
}
@Override
void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) {
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file);
if (snapshotFile != null) {
long snapshotFileLen = fs.exists(snapshotFile) ? fs.getFileStatus(
snapshotFile).getLen() : -1L;
snapshotFileLengthMap.put(snapshotFile, snapshotFileLen);
}
}
}
@Override
void modify() throws Exception {
assertTrue(fs.exists(file));
DFSTestUtil.appendFile(fs, file, appendLen);
}
@Override
void checkSnapshots() throws Exception {
byte[] buffer = new byte[32];
for (Path snapshotFile : snapshotFileLengthMap.keySet()) {
long currentSnapshotFileLen = fs.exists(snapshotFile) ? fs
.getFileStatus(snapshotFile).getLen() : -1L;
long originalSnapshotFileLen = snapshotFileLengthMap.get(snapshotFile);
String s = null;
if (currentSnapshotFileLen != originalSnapshotFileLen) {
s = "FAILED: " + getClass().getSimpleName()
+ ": file=" + file + ", snapshotFile" + snapshotFile
+ "\n\n currentSnapshotFileLen = " + currentSnapshotFileLen
+ "\noriginalSnapshotFileLen = " + originalSnapshotFileLen
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
SnapshotTestHelper.dumpTree(s, cluster);
}
assertEquals(s, originalSnapshotFileLen, currentSnapshotFileLen);
// Read the snapshot file out of the boundary
if (currentSnapshotFileLen != -1L
&& !(this instanceof FileAppendNotClose)) {
FSDataInputStream input = fs.open(snapshotFile);
int readLen = input.read(currentSnapshotFileLen, buffer, 0, 1);
if (readLen != -1) {
s = "FAILED: " + getClass().getSimpleName()
+ ": file=" + file + ", snapshotFile" + snapshotFile
+ "\n\n currentSnapshotFileLen = " + currentSnapshotFileLen
+ "\n readLen = " + readLen
+ "\n\nfile : " + fsdir.getINode(file.toString()).toDetailString()
+ "\n\nsnapshotFile: " + fsdir.getINode(snapshotFile.toString()).toDetailString();
SnapshotTestHelper.dumpTree(s, cluster);
}
assertEquals(s, -1, readLen);
input.close();
}
}
}
}
/**
* Appending a specified length to an existing file but not close the file
*/
static class FileAppendNotClose extends FileAppend {
HdfsDataOutputStream out;
FileAppendNotClose(Path file, FileSystem fs, int len) {
super(file, fs, len);
}
@Override
void modify() throws Exception {
assertTrue(fs.exists(file));
byte[] toAppend = new byte[appendLen];
random.nextBytes(toAppend);
out = (HdfsDataOutputStream)fs.append(file);
out.write(toAppend);
out.hsync(EnumSet.of(HdfsDataOutputStream.SyncFlag.UPDATE_LENGTH));
}
}
/**
* Appending a specified length to an existing file
*/
static class FileAppendClose extends FileAppend {
final FileAppendNotClose fileAppendNotClose;
FileAppendClose(Path file, FileSystem fs, int len,
FileAppendNotClose fileAppendNotClose) {
super(file, fs, len);
this.fileAppendNotClose = fileAppendNotClose;
}
@Override
void modify() throws Exception {
assertTrue(fs.exists(file));
byte[] toAppend = new byte[appendLen];
random.nextBytes(toAppend);
fileAppendNotClose.out.write(toAppend);
fileAppendNotClose.out.close();
}
}
/**
* New file creation
*/
static class FileCreation extends Modification {
final int fileLen;
private final HashMap<Path, FileStatus> fileStatusMap;
FileCreation(Path file, FileSystem fs, int len) {
super(file, fs, "creation");
assert len >= 0;
this.fileLen = len;
fileStatusMap = new HashMap<Path, FileStatus>();
}
@Override
void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) {
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file);
if (snapshotFile != null) {
FileStatus status =
fs.exists(snapshotFile) ? fs.getFileStatus(snapshotFile) : null;
fileStatusMap.put(snapshotFile, status);
}
}
}
@Override
void modify() throws Exception {
DFSTestUtil.createFile(fs, file, fileLen,
REPLICATION, seed);
}
@Override
void checkSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) {
Path snapshotFile = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file);
if (snapshotFile != null) {
boolean computed = fs.exists(snapshotFile);
boolean expected = fileStatusMap.get(snapshotFile) != null;
assertEquals(expected, computed);
if (computed) {
FileStatus currentSnapshotStatus = fs.getFileStatus(snapshotFile);
FileStatus originalStatus = fileStatusMap.get(snapshotFile);
// We compare the string because it contains all the information,
// while FileStatus#equals only compares the path
assertEquals(currentSnapshotStatus.toString(),
originalStatus.toString());
}
}
}
}
}
/**
* File deletion
*/
static class FileDeletion extends Modification {
private final HashMap<Path, Boolean> snapshotFileExistenceMap;
FileDeletion(Path file, FileSystem fs) {
super(file, fs, "deletion");
snapshotFileExistenceMap = new HashMap<Path, Boolean>();
}
@Override
void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) {
boolean existence = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file) != null;
snapshotFileExistenceMap.put(snapshotRoot, existence);
}
}
@Override
void modify() throws Exception {
fs.delete(file, true);
}
@Override
void checkSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) {
boolean currentSnapshotFileExist = SnapshotTestHelper.getSnapshotFile(
snapshotRoot, file) != null;
boolean originalSnapshotFileExist = snapshotFileExistenceMap
.get(snapshotRoot);
assertEquals(currentSnapshotFileExist, originalSnapshotFileExist);
}
}
}
/**
* Directory creation or deletion.
*/
class DirCreationOrDeletion extends Modification {
private final TestDirectoryTree.Node node;
private final boolean isCreation;
private final Path changedPath;
private final HashMap<Path, FileStatus> statusMap;
DirCreationOrDeletion(Path file, FileSystem fs, TestDirectoryTree.Node node,
boolean isCreation) {
super(file, fs, "dircreation");
this.node = node;
// If the node's nonSnapshotChildren is empty, we still need to create
// sub-directories
this.isCreation = isCreation || node.nonSnapshotChildren.isEmpty();
if (this.isCreation) {
// Generate the path for the dir to be created
changedPath = new Path(node.nodePath, "sub"
+ node.nonSnapshotChildren.size());
} else {
// If deletion, we delete the current last dir in nonSnapshotChildren
changedPath = node.nonSnapshotChildren.get(node.nonSnapshotChildren
.size() - 1).nodePath;
}
this.statusMap = new HashMap<Path, FileStatus>();
}
@Override
void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) {
Path snapshotDir = SnapshotTestHelper.getSnapshotFile(snapshotRoot,
changedPath);
if (snapshotDir != null) {
FileStatus status = fs.exists(snapshotDir) ? fs
.getFileStatus(snapshotDir) : null;
statusMap.put(snapshotDir, status);
// In each non-snapshottable directory, we also create a file. Thus
// here we also need to check the file's status before/after taking
// snapshots
Path snapshotFile = new Path(snapshotDir, "file0");
status = fs.exists(snapshotFile) ? fs.getFileStatus(snapshotFile)
: null;
statusMap.put(snapshotFile, status);
}
}
}
@Override
void modify() throws Exception {
if (isCreation) {
// creation
TestDirectoryTree.Node newChild = new TestDirectoryTree.Node(
changedPath, node.level + 1, node, hdfs);
// create file under the new non-snapshottable directory
newChild.initFileList(hdfs, node.nodePath.getName(), BLOCKSIZE,
REPLICATION, seed, 2);
node.nonSnapshotChildren.add(newChild);
} else {
// deletion
TestDirectoryTree.Node childToDelete = node.nonSnapshotChildren
.remove(node.nonSnapshotChildren.size() - 1);
hdfs.delete(childToDelete.nodePath, true);
}
}
@Override
void checkSnapshots() throws Exception {
for (Path snapshot : statusMap.keySet()) {
FileStatus currentStatus = fs.exists(snapshot) ? fs
.getFileStatus(snapshot) : null;
FileStatus originalStatus = statusMap.get(snapshot);
assertEquals(currentStatus, originalStatus);
if (currentStatus != null) {
assertEquals(currentStatus.toString(), originalStatus.toString());
}
}
}
}
/**
* Directory creation or deletion.
*/
class DirRename extends Modification {
private final TestDirectoryTree.Node srcParent;
private final TestDirectoryTree.Node dstParent;
private final Path srcPath;
private final Path dstPath;
private final HashMap<Path, FileStatus> statusMap;
DirRename(Path file, FileSystem fs, TestDirectoryTree.Node src,
TestDirectoryTree.Node dst) throws Exception {
super(file, fs, "dirrename");
this.srcParent = src;
this.dstParent = dst;
dstPath = new Path(dstParent.nodePath, "sub"
+ dstParent.nonSnapshotChildren.size());
// If the srcParent's nonSnapshotChildren is empty, we need to create
// sub-directories
if (srcParent.nonSnapshotChildren.isEmpty()) {
srcPath = new Path(srcParent.nodePath, "sub"
+ srcParent.nonSnapshotChildren.size());
// creation
TestDirectoryTree.Node newChild = new TestDirectoryTree.Node(
srcPath, srcParent.level + 1, srcParent, hdfs);
// create file under the new non-snapshottable directory
newChild.initFileList(hdfs, srcParent.nodePath.getName(), BLOCKSIZE,
REPLICATION, seed, 2);
srcParent.nonSnapshotChildren.add(newChild);
} else {
srcPath = new Path(srcParent.nodePath, "sub"
+ (srcParent.nonSnapshotChildren.size() - 1));
}
this.statusMap = new HashMap<Path, FileStatus>();
}
@Override
void loadSnapshots() throws Exception {
for (Path snapshotRoot : snapshotList) {
Path snapshotDir = SnapshotTestHelper.getSnapshotFile(snapshotRoot,
srcPath);
if (snapshotDir != null) {
FileStatus status = fs.exists(snapshotDir) ? fs
.getFileStatus(snapshotDir) : null;
statusMap.put(snapshotDir, status);
// In each non-snapshottable directory, we also create a file. Thus
// here we also need to check the file's status before/after taking
// snapshots
Path snapshotFile = new Path(snapshotDir, "file0");
status = fs.exists(snapshotFile) ? fs.getFileStatus(snapshotFile)
: null;
statusMap.put(snapshotFile, status);
}
}
}
@Override
void modify() throws Exception {
hdfs.rename(srcPath, dstPath);
TestDirectoryTree.Node newDstChild = new TestDirectoryTree.Node(
dstPath, dstParent.level + 1, dstParent, hdfs);
dstParent.nonSnapshotChildren.add(newDstChild);
}
@Override
void checkSnapshots() throws Exception {
for (Path snapshot : statusMap.keySet()) {
FileStatus currentStatus = fs.exists(snapshot) ? fs
.getFileStatus(snapshot) : null;
FileStatus originalStatus = statusMap.get(snapshot);
assertEquals(currentStatus, originalStatus);
if (currentStatus != null) {
assertEquals(currentStatus.toString(), originalStatus.toString());
}
}
}
}
}
| 39,201
| 35.603175
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestXAttrWithSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import java.util.EnumSet;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.ToolRunner;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* Tests interaction of XAttrs with snapshots.
*/
public class TestXAttrWithSnapshot {
private static MiniDFSCluster cluster;
private static Configuration conf;
private static DistributedFileSystem hdfs;
private static int pathCount = 0;
private static Path path, snapshotPath, snapshotPath2, snapshotPath3;
private static String snapshotName, snapshotName2, snapshotName3;
private final int SUCCESS = 0;
// XAttrs
private static final String name1 = "user.a1";
private static final byte[] value1 = { 0x31, 0x32, 0x33 };
private static final byte[] newValue1 = { 0x31, 0x31, 0x31 };
private static final String name2 = "user.a2";
private static final byte[] value2 = { 0x37, 0x38, 0x39 };
@Rule
public ExpectedException exception = ExpectedException.none();
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
initCluster(true);
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs);
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() {
++pathCount;
path = new Path("/p" + pathCount);
snapshotName = "snapshot" + pathCount;
snapshotName2 = snapshotName + "-2";
snapshotName3 = snapshotName + "-3";
snapshotPath = new Path(path, new Path(".snapshot", snapshotName));
snapshotPath2 = new Path(path, new Path(".snapshot", snapshotName2));
snapshotPath3 = new Path(path, new Path(".snapshot", snapshotName3));
}
/**
* Tests modifying xattrs on a directory that has been snapshotted
*/
@Test (timeout = 120000)
public void testModifyReadsCurrentState() throws Exception {
// Init
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
hdfs.setXAttr(path, name1, value1);
hdfs.setXAttr(path, name2, value2);
// Verify that current path reflects xattrs, snapshot doesn't
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
assertEquals(xattrs.size(), 2);
assertArrayEquals(value1, xattrs.get(name1));
assertArrayEquals(value2, xattrs.get(name2));
xattrs = hdfs.getXAttrs(snapshotPath);
assertEquals(xattrs.size(), 0);
// Modify each xattr and make sure it's reflected
hdfs.setXAttr(path, name1, value2, EnumSet.of(XAttrSetFlag.REPLACE));
xattrs = hdfs.getXAttrs(path);
assertEquals(xattrs.size(), 2);
assertArrayEquals(value2, xattrs.get(name1));
assertArrayEquals(value2, xattrs.get(name2));
hdfs.setXAttr(path, name2, value1, EnumSet.of(XAttrSetFlag.REPLACE));
xattrs = hdfs.getXAttrs(path);
assertEquals(xattrs.size(), 2);
assertArrayEquals(value2, xattrs.get(name1));
assertArrayEquals(value1, xattrs.get(name2));
// Paranoia checks
xattrs = hdfs.getXAttrs(snapshotPath);
assertEquals(xattrs.size(), 0);
hdfs.removeXAttr(path, name1);
hdfs.removeXAttr(path, name2);
xattrs = hdfs.getXAttrs(path);
assertEquals(xattrs.size(), 0);
}
/**
* Tests removing xattrs on a directory that has been snapshotted
*/
@Test (timeout = 120000)
public void testRemoveReadsCurrentState() throws Exception {
// Init
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
hdfs.setXAttr(path, name1, value1);
hdfs.setXAttr(path, name2, value2);
// Verify that current path reflects xattrs, snapshot doesn't
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
assertEquals(xattrs.size(), 2);
assertArrayEquals(value1, xattrs.get(name1));
assertArrayEquals(value2, xattrs.get(name2));
xattrs = hdfs.getXAttrs(snapshotPath);
assertEquals(xattrs.size(), 0);
// Remove xattrs and verify one-by-one
hdfs.removeXAttr(path, name2);
xattrs = hdfs.getXAttrs(path);
assertEquals(xattrs.size(), 1);
assertArrayEquals(value1, xattrs.get(name1));
hdfs.removeXAttr(path, name1);
xattrs = hdfs.getXAttrs(path);
assertEquals(xattrs.size(), 0);
}
/**
* 1) Save xattrs, then create snapshot. Assert that inode of original and
* snapshot have same xattrs. 2) Change the original xattrs, assert snapshot
* still has old xattrs.
*/
@Test
public void testXAttrForSnapshotRootAfterChange() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
hdfs.setXAttr(path, name1, value1);
hdfs.setXAttr(path, name2, value2);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot have same XAttrs.
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
xattrs = hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
// Original XAttrs have changed, but snapshot still has old XAttrs.
hdfs.setXAttr(path, name1, newValue1);
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path, snapshotPath);
}
private static void doSnapshotRootChangeAssertions(Path path,
Path snapshotPath) throws Exception {
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
xattrs = hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
}
/**
* 1) Save xattrs, then create snapshot. Assert that inode of original and
* snapshot have same xattrs. 2) Remove some original xattrs, assert snapshot
* still has old xattrs.
*/
@Test
public void testXAttrForSnapshotRootAfterRemove() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
hdfs.setXAttr(path, name1, value1);
hdfs.setXAttr(path, name2, value2);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot have same XAttrs.
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
xattrs = hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(xattrs.size(), 2);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
// Original XAttrs have been removed, but snapshot still has old XAttrs.
hdfs.removeXAttr(path, name1);
hdfs.removeXAttr(path, name2);
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path, snapshotPath);
}
private static void doSnapshotRootRemovalAssertions(Path path,
Path snapshotPath) throws Exception {
Map<String, byte[]> xattrs = hdfs.getXAttrs(path);
Assert.assertEquals(0, xattrs.size());
xattrs = hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(2, xattrs.size());
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
}
/**
* Test successive snapshots in between modifications of XAttrs.
* Also verify that snapshot XAttrs are not altered when a
* snapshot is deleted.
*/
@Test
public void testSuccessiveSnapshotXAttrChanges() throws Exception {
// First snapshot
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
hdfs.setXAttr(path, name1, value1);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(1, xattrs.size());
Assert.assertArrayEquals(value1, xattrs.get(name1));
// Second snapshot
hdfs.setXAttr(path, name1, newValue1);
hdfs.setXAttr(path, name2, value2);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName2);
xattrs = hdfs.getXAttrs(snapshotPath2);
Assert.assertEquals(2, xattrs.size());
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
// Third snapshot
hdfs.setXAttr(path, name1, value1);
hdfs.removeXAttr(path, name2);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName3);
xattrs = hdfs.getXAttrs(snapshotPath3);
Assert.assertEquals(1, xattrs.size());
Assert.assertArrayEquals(value1, xattrs.get(name1));
// Check that the first and second snapshots'
// XAttrs have stayed constant
xattrs = hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(1, xattrs.size());
Assert.assertArrayEquals(value1, xattrs.get(name1));
xattrs = hdfs.getXAttrs(snapshotPath2);
Assert.assertEquals(2, xattrs.size());
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
// Remove the second snapshot and verify the first and
// third snapshots' XAttrs have stayed constant
hdfs.deleteSnapshot(path, snapshotName2);
xattrs = hdfs.getXAttrs(snapshotPath);
Assert.assertEquals(1, xattrs.size());
Assert.assertArrayEquals(value1, xattrs.get(name1));
xattrs = hdfs.getXAttrs(snapshotPath3);
Assert.assertEquals(1, xattrs.size());
Assert.assertArrayEquals(value1, xattrs.get(name1));
hdfs.deleteSnapshot(path, snapshotName);
hdfs.deleteSnapshot(path, snapshotName3);
}
/**
* Assert exception of setting xattr on read-only snapshot.
*/
@Test
public void testSetXAttrSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.setXAttr(snapshotPath, name1, value1);
}
/**
* Assert exception of removing xattr on read-only snapshot.
*/
@Test
public void testRemoveXAttrSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
hdfs.setXAttr(path, name1, value1);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeXAttr(snapshotPath, name1);
}
/**
* Test that users can copy a snapshot while preserving its xattrs.
*/
@Test (timeout = 120000)
public void testCopySnapshotShouldPreserveXAttrs() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short) 0700));
hdfs.setXAttr(path, name1, value1);
hdfs.setXAttr(path, name2, value2);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
Path snapshotCopy = new Path(path.toString() + "-copy");
String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(),
snapshotCopy.toUri().toString() };
int ret = ToolRunner.run(new FsShell(conf), argv);
assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);
Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotCopy);
assertArrayEquals(value1, xattrs.get(name1));
assertArrayEquals(value2, xattrs.get(name2));
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem
* instances for our test users.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @throws Exception if any step fails
*/
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @throws Exception if restart fails
*/
private static void restart(boolean checkpoint) throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false);
}
}
| 14,702
| 36.128788
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestCheckpointsWithSnapshots.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode;
import org.junit.Before;
import org.junit.Test;
public class TestCheckpointsWithSnapshots {
private static final Path TEST_PATH = new Path("/foo");
private static final Configuration conf = new HdfsConfiguration();
static {
conf.set(DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
}
@Before
public void setUp() {
FileUtil.fullyDeleteContents(new File(MiniDFSCluster.getBaseDirectory()));
}
/**
* Regression test for HDFS-5433 - "When reloading fsimage during
* checkpointing, we should clear existing snapshottable directories"
*/
@Test
public void testCheckpoint() throws IOException {
MiniDFSCluster cluster = null;
SecondaryNameNode secondary = null;
try {
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
secondary = new SecondaryNameNode(conf);
SnapshotManager nnSnapshotManager = cluster.getNamesystem().getSnapshotManager();
SnapshotManager secondarySnapshotManager = secondary.getFSNamesystem().getSnapshotManager();
FileSystem fs = cluster.getFileSystem();
HdfsAdmin admin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
assertEquals(0, nnSnapshotManager.getNumSnapshots());
assertEquals(0, nnSnapshotManager.getNumSnapshottableDirs());
assertEquals(0, secondarySnapshotManager.getNumSnapshots());
assertEquals(0, secondarySnapshotManager.getNumSnapshottableDirs());
// 1. Create a snapshottable directory foo on the NN.
fs.mkdirs(TEST_PATH);
admin.allowSnapshot(TEST_PATH);
assertEquals(0, nnSnapshotManager.getNumSnapshots());
assertEquals(1, nnSnapshotManager.getNumSnapshottableDirs());
// 2. Create a snapshot of the dir foo. This will be referenced both in
// the SnapshotManager as well as in the file system tree. The snapshot
// count will go up to 1.
Path snapshotPath = fs.createSnapshot(TEST_PATH);
assertEquals(1, nnSnapshotManager.getNumSnapshots());
assertEquals(1, nnSnapshotManager.getNumSnapshottableDirs());
// 3. Start up a 2NN and have it do a checkpoint. It will have foo and its
// snapshot in its list of snapshottable dirs referenced from the
// SnapshotManager, as well as in the file system tree.
secondary.doCheckpoint();
assertEquals(1, secondarySnapshotManager.getNumSnapshots());
assertEquals(1, secondarySnapshotManager.getNumSnapshottableDirs());
// 4. Disallow snapshots on and delete foo on the NN. The snapshot count
// will go down to 0 and the snapshottable dir will be removed from the fs
// tree.
fs.deleteSnapshot(TEST_PATH, snapshotPath.getName());
admin.disallowSnapshot(TEST_PATH);
assertEquals(0, nnSnapshotManager.getNumSnapshots());
assertEquals(0, nnSnapshotManager.getNumSnapshottableDirs());
// 5. Have the NN do a saveNamespace, writing out a new fsimage with
// snapshot count 0.
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
NameNodeAdapter.saveNamespace(cluster.getNameNode());
NameNodeAdapter.leaveSafeMode(cluster.getNameNode());
// 6. Have the still-running 2NN do a checkpoint. It will notice that the
// fsimage has changed on the NN and redownload/reload from that image.
// This will replace all INodes in the file system tree as well as reset
// the snapshot counter to 0 in the SnapshotManager. However, it will not
// clear the list of snapshottable dirs referenced from the
// SnapshotManager. When it writes out an fsimage, the 2NN will write out
// 0 for the snapshot count, but still serialize the snapshottable dir
// referenced in the SnapshotManager even though it no longer appears in
// the file system tree. The NN will not be able to start up with this.
secondary.doCheckpoint();
assertEquals(0, secondarySnapshotManager.getNumSnapshots());
assertEquals(0, secondarySnapshotManager.getNumSnapshottableDirs());
} finally {
if (cluster != null) {
cluster.shutdown();
}
if (secondary != null) {
secondary.shutdown();
}
}
}
}
| 5,675
| 42
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayOutputStream;
import java.io.PrintStream;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* Test for renaming snapshot
*/
public class TestSnapshotRename {
static final long seed = 0;
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1024;
private final Path dir = new Path("/TestSnapshot");
private final Path sub1 = new Path(dir, "sub1");
private final Path file1 = new Path(sub1, "file1");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
FSDirectory fsdir;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
fsdir = fsn.getFSDirectory();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Rule
public ExpectedException exception = ExpectedException.none();
/**
* Check the correctness of snapshot list within snapshottable dir
*/
private void checkSnapshotList(INodeDirectory srcRoot,
String[] sortedNames, String[] names) {
assertTrue(srcRoot.isSnapshottable());
ReadOnlyList<Snapshot> listByName = srcRoot
.getDirectorySnapshottableFeature().getSnapshotList();
assertEquals(sortedNames.length, listByName.size());
for (int i = 0; i < listByName.size(); i++) {
assertEquals(sortedNames[i], listByName.get(i).getRoot().getLocalName());
}
List<DirectoryDiff> listByTime = srcRoot.getDiffs().asList();
assertEquals(names.length, listByTime.size());
for (int i = 0; i < listByTime.size(); i++) {
Snapshot s = srcRoot.getDirectorySnapshottableFeature().getSnapshotById(
listByTime.get(i).getSnapshotId());
assertEquals(names[i], s.getRoot().getLocalName());
}
}
/**
* Rename snapshot(s), and check the correctness of the snapshot list within
* {@link INodeDirectorySnapshottable}
*/
@Test (timeout=60000)
public void testSnapshotList() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create three snapshots for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s3");
// Rename s3 to s22
hdfs.renameSnapshot(sub1, "s3", "s22");
// Check the snapshots list
INodeDirectory srcRoot = fsdir.getINode(sub1.toString()).asDirectory();
checkSnapshotList(srcRoot, new String[] { "s1", "s2", "s22" },
new String[] { "s1", "s2", "s22" });
// Rename s1 to s4
hdfs.renameSnapshot(sub1, "s1", "s4");
checkSnapshotList(srcRoot, new String[] { "s2", "s22", "s4" },
new String[] { "s4", "s2", "s22" });
// Rename s22 to s0
hdfs.renameSnapshot(sub1, "s22", "s0");
checkSnapshotList(srcRoot, new String[] { "s0", "s2", "s4" },
new String[] { "s4", "s2", "s0" });
}
/**
* Test FileStatus of snapshot file before/after rename
*/
@Test (timeout=60000)
public void testSnapshotRename() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create snapshot for sub1
Path snapshotRoot = SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
Path ssPath = new Path(snapshotRoot, file1.getName());
assertTrue(hdfs.exists(ssPath));
FileStatus statusBeforeRename = hdfs.getFileStatus(ssPath);
// Rename the snapshot
hdfs.renameSnapshot(sub1, "s1", "s2");
// <sub1>/.snapshot/s1/file1 should no longer exist
assertFalse(hdfs.exists(ssPath));
snapshotRoot = SnapshotTestHelper.getSnapshotRoot(sub1, "s2");
ssPath = new Path(snapshotRoot, file1.getName());
// Instead, <sub1>/.snapshot/s2/file1 should exist
assertTrue(hdfs.exists(ssPath));
FileStatus statusAfterRename = hdfs.getFileStatus(ssPath);
// FileStatus of the snapshot should not change except the path
assertFalse(statusBeforeRename.equals(statusAfterRename));
statusBeforeRename.setPath(statusAfterRename.getPath());
assertEquals(statusBeforeRename.toString(), statusAfterRename.toString());
}
/**
* Test rename a non-existing snapshot
*/
@Test (timeout=60000)
public void testRenameNonExistingSnapshot() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create snapshot for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
exception.expect(SnapshotException.class);
String error = "The snapshot wrongName does not exist for directory "
+ sub1.toString();
exception.expectMessage(error);
hdfs.renameSnapshot(sub1, "wrongName", "s2");
}
/**
* Test rename a snapshot to another existing snapshot
*/
@Test (timeout=60000)
public void testRenameToExistingSnapshot() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create snapshots for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s2");
exception.expect(SnapshotException.class);
String error = "The snapshot s2 already exists for directory "
+ sub1.toString();
exception.expectMessage(error);
hdfs.renameSnapshot(sub1, "s1", "s2");
}
/**
* Test renaming a snapshot with illegal name
*/
@Test
public void testRenameWithIllegalName() throws Exception {
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Create snapshots for sub1
SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1");
final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR;
try {
hdfs.renameSnapshot(sub1, "s1", name1);
fail("Exception expected when an illegal name is given for rename");
} catch (RemoteException e) {
String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR
+ "\" is a reserved name.";
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\"";
final String[] badNames = new String[] { "foo" + Path.SEPARATOR,
Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" };
for (String badName : badNames) {
try {
hdfs.renameSnapshot(sub1, "s1", badName);
fail("Exception expected when an illegal name is given");
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains(errorMsg, e);
}
}
}
@Test
public void testRenameSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
System.setOut(psOut);
System.setErr(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv1 = {"-renameSnapshot", "/tmp", "s1"};
int val = shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString().contains(
argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"};
val = shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString().contains(
argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
}
}
| 9,507
| 35.429119
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileWithSnapshotFeature.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import com.google.common.collect.Lists;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import java.util.ArrayList;
import static org.apache.hadoop.fs.StorageType.DISK;
import static org.apache.hadoop.fs.StorageType.SSD;
import static org.mockito.Mockito.anyByte;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestFileWithSnapshotFeature {
private static final int BLOCK_SIZE = 1024;
private static final short REPL_3 = 3;
private static final short REPL_1 = 1;
@Test
public void testUpdateQuotaAndCollectBlocks() {
FileDiffList diffs = new FileDiffList();
FileWithSnapshotFeature sf = new FileWithSnapshotFeature(diffs);
FileDiff diff = mock(FileDiff.class);
BlockStoragePolicySuite bsps = mock(BlockStoragePolicySuite.class);
BlockStoragePolicy bsp = mock(BlockStoragePolicy.class);
BlockInfo[] blocks = new BlockInfo[] {
new BlockInfoContiguous(new Block(1, BLOCK_SIZE, 1), REPL_1)
};
// No snapshot
INodeFile file = mock(INodeFile.class);
when(file.getFileWithSnapshotFeature()).thenReturn(sf);
when(file.getBlocks()).thenReturn(blocks);
when(file.getStoragePolicyID()).thenReturn((byte) 1);
when(bsps.getPolicy(anyByte())).thenReturn(bsp);
INode.BlocksMapUpdateInfo collectedBlocks = mock(
INode.BlocksMapUpdateInfo.class);
ArrayList<INode> removedINodes = new ArrayList<>();
INode.ReclaimContext ctx = new INode.ReclaimContext(
bsps, collectedBlocks, removedINodes, null);
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
QuotaCounts counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals(0, counts.getStorageSpace());
Assert.assertTrue(counts.getTypeSpaces().allLessOrEqual(0));
// INode only exists in the snapshot
INodeFile snapshotINode = mock(INodeFile.class);
when(file.getPreferredBlockReplication()).thenReturn(REPL_1);
Whitebox.setInternalState(snapshotINode, "header", (long) REPL_3 << 48);
Whitebox.setInternalState(diff, "snapshotINode", snapshotINode);
when(diff.getSnapshotINode()).thenReturn(snapshotINode);
when(bsp.chooseStorageTypes(REPL_1))
.thenReturn(Lists.newArrayList(SSD));
when(bsp.chooseStorageTypes(REPL_3))
.thenReturn(Lists.newArrayList(DISK));
sf.updateQuotaAndCollectBlocks(ctx, file, diff);
counts = ctx.quotaDelta().getCountsCopy();
Assert.assertEquals((REPL_3 - REPL_1) * BLOCK_SIZE,
counts.getStorageSpace());
Assert.assertEquals(BLOCK_SIZE, counts.getTypeSpaces().get(DISK));
Assert.assertEquals(-BLOCK_SIZE, counts.getTypeSpaces().get(SSD));
}
}
| 4,069
| 42.763441
| 77
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.FileWriter;
import java.io.IOException;
import java.io.PrintWriter;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceStorage;
import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DirectoryScanner;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.ipc.ProtobufRpcEngine.Server;
import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
/**
* Helper for writing snapshot related tests
*/
public class SnapshotTestHelper {
public static final Log LOG = LogFactory.getLog(SnapshotTestHelper.class);
/** Disable the logs that are not very useful for snapshot related tests. */
public static void disableLogs() {
final String[] lognames = {
"org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner",
"org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl",
"org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService",
};
for(String n : lognames) {
GenericTestUtils.disableLog(LogFactory.getLog(n));
}
GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class));
GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class));
GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class));
GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class));
GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class));
GenericTestUtils.disableLog(BlockScanner.LOG);
GenericTestUtils.disableLog(HttpServer2.LOG);
GenericTestUtils.disableLog(DataNode.LOG);
GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG);
GenericTestUtils.disableLog(LeaseManager.LOG);
GenericTestUtils.disableLog(NameNode.stateChangeLog);
GenericTestUtils.disableLog(NameNode.blockStateChangeLog);
GenericTestUtils.disableLog(DFSClient.LOG);
GenericTestUtils.disableLog(Server.LOG);
}
private SnapshotTestHelper() {
// Cannot be instantinatied
}
public static Path getSnapshotRoot(Path snapshottedDir, String snapshotName) {
return new Path(snapshottedDir, HdfsConstants.DOT_SNAPSHOT_DIR + "/"
+ snapshotName);
}
public static Path getSnapshotPath(Path snapshottedDir, String snapshotName,
String fileLocalName) {
return new Path(getSnapshotRoot(snapshottedDir, snapshotName),
fileLocalName);
}
/**
* Create snapshot for a dir using a given snapshot name
*
* @param hdfs DistributedFileSystem instance
* @param snapshotRoot The dir to be snapshotted
* @param snapshotName The name of the snapshot
* @return The path of the snapshot root
*/
public static Path createSnapshot(DistributedFileSystem hdfs,
Path snapshotRoot, String snapshotName) throws Exception {
LOG.info("createSnapshot " + snapshotName + " for " + snapshotRoot);
assertTrue(hdfs.exists(snapshotRoot));
hdfs.allowSnapshot(snapshotRoot);
hdfs.createSnapshot(snapshotRoot, snapshotName);
// set quota to a large value for testing counts
hdfs.setQuota(snapshotRoot, Long.MAX_VALUE-1, Long.MAX_VALUE-1);
return SnapshotTestHelper.getSnapshotRoot(snapshotRoot, snapshotName);
}
/**
* Check the functionality of a snapshot.
*
* @param hdfs DistributedFileSystem instance
* @param snapshotRoot The root of the snapshot
* @param snapshottedDir The snapshotted directory
*/
public static void checkSnapshotCreation(DistributedFileSystem hdfs,
Path snapshotRoot, Path snapshottedDir) throws Exception {
// Currently we only check if the snapshot was created successfully
assertTrue(hdfs.exists(snapshotRoot));
// Compare the snapshot with the current dir
FileStatus[] currentFiles = hdfs.listStatus(snapshottedDir);
FileStatus[] snapshotFiles = hdfs.listStatus(snapshotRoot);
assertEquals("snapshottedDir=" + snapshottedDir
+ ", snapshotRoot=" + snapshotRoot,
currentFiles.length, snapshotFiles.length);
}
/**
* Compare two dumped trees that are stored in two files. The following is an
* example of the dumped tree:
*
* <pre>
* information of root
* +- the first child of root (e.g., /foo)
* +- the first child of /foo
* ...
* \- the last child of /foo (e.g., /foo/bar)
* +- the first child of /foo/bar
* ...
* snapshots of /foo
* +- snapshot s_1
* ...
* \- snapshot s_n
* +- second child of root
* ...
* \- last child of root
*
* The following information is dumped for each inode:
* localName (className@hashCode) parent permission group user
*
* Specific information for different types of INode:
* {@link INodeDirectory}:childrenSize
* {@link INodeFile}: fileSize, block list. Check {@link BlockInfo#toString()}
* and {@link BlockInfoContiguousUnderConstruction#toString()} for detailed information.
* {@link FileWithSnapshot}: next link
* </pre>
* @see INode#dumpTreeRecursively()
*/
public static void compareDumpedTreeInFile(File file1, File file2,
boolean compareQuota) throws IOException {
try {
compareDumpedTreeInFile(file1, file2, compareQuota, false);
} catch(Throwable t) {
LOG.info("FAILED compareDumpedTreeInFile(" + file1 + ", " + file2 + ")", t);
compareDumpedTreeInFile(file1, file2, compareQuota, true);
}
}
private static void compareDumpedTreeInFile(File file1, File file2,
boolean compareQuota, boolean print) throws IOException {
if (print) {
printFile(file1);
printFile(file2);
}
BufferedReader reader1 = new BufferedReader(new FileReader(file1));
BufferedReader reader2 = new BufferedReader(new FileReader(file2));
try {
String line1 = "";
String line2 = "";
while ((line1 = reader1.readLine()) != null
&& (line2 = reader2.readLine()) != null) {
if (print) {
System.out.println();
System.out.println("1) " + line1);
System.out.println("2) " + line2);
}
// skip the hashCode part of the object string during the comparison,
// also ignore the difference between INodeFile/INodeFileWithSnapshot
line1 = line1.replaceAll("INodeFileWithSnapshot", "INodeFile");
line2 = line2.replaceAll("INodeFileWithSnapshot", "INodeFile");
line1 = line1.replaceAll("@[\\dabcdef]+", "");
line2 = line2.replaceAll("@[\\dabcdef]+", "");
// skip the replica field of the last block of an
// INodeFileUnderConstruction
line1 = line1.replaceAll("replicas=\\[.*\\]", "replicas=[]");
line2 = line2.replaceAll("replicas=\\[.*\\]", "replicas=[]");
if (!compareQuota) {
line1 = line1.replaceAll("Quota\\[.*\\]", "Quota[]");
line2 = line2.replaceAll("Quota\\[.*\\]", "Quota[]");
}
// skip the specific fields of BlockInfoUnderConstruction when the node
// is an INodeFileSnapshot or an INodeFileUnderConstructionSnapshot
if (line1.contains("(INodeFileSnapshot)")
|| line1.contains("(INodeFileUnderConstructionSnapshot)")) {
line1 = line1.replaceAll(
"\\{blockUCState=\\w+, primaryNodeIndex=[-\\d]+, replicas=\\[\\]\\}",
"");
line2 = line2.replaceAll(
"\\{blockUCState=\\w+, primaryNodeIndex=[-\\d]+, replicas=\\[\\]\\}",
"");
}
assertEquals(line1, line2);
}
Assert.assertNull(reader1.readLine());
Assert.assertNull(reader2.readLine());
} finally {
reader1.close();
reader2.close();
}
}
static void printFile(File f) throws IOException {
System.out.println();
System.out.println("File: " + f);
BufferedReader in = new BufferedReader(new FileReader(f));
try {
for(String line; (line = in.readLine()) != null; ) {
System.out.println(line);
}
} finally {
in.close();
}
}
public static void dumpTree2File(FSDirectory fsdir, File f) throws IOException{
final PrintWriter out = new PrintWriter(new FileWriter(f, false), true);
fsdir.getINode("/").dumpTreeRecursively(out, new StringBuilder(),
Snapshot.CURRENT_STATE_ID);
out.close();
}
/**
* Generate the path for a snapshot file.
*
* @param snapshotRoot of format
* {@literal <snapshottble_dir>/.snapshot/<snapshot_name>}
* @param file path to a file
* @return The path of the snapshot of the file assuming the file has a
* snapshot under the snapshot root of format
* {@literal <snapshottble_dir>/.snapshot/<snapshot_name>/<path_to_file_inside_snapshot>}
* . Null if the file is not under the directory associated with the
* snapshot root.
*/
static Path getSnapshotFile(Path snapshotRoot, Path file) {
Path rootParent = snapshotRoot.getParent();
if (rootParent != null && rootParent.getName().equals(".snapshot")) {
Path snapshotDir = rootParent.getParent();
if (file.toString().contains(snapshotDir.toString())
&& !file.equals(snapshotDir)) {
String fileName = file.toString().substring(
snapshotDir.toString().length() + 1);
Path snapshotFile = new Path(snapshotRoot, fileName);
return snapshotFile;
}
}
return null;
}
/**
* A class creating directories trees for snapshot testing. For simplicity,
* the directory tree is a binary tree, i.e., each directory has two children
* as snapshottable directories.
*/
static class TestDirectoryTree {
/** Height of the directory tree */
final int height;
/** Top node of the directory tree */
final Node topNode;
/** A map recording nodes for each tree level */
final Map<Integer, ArrayList<Node>> levelMap;
/**
* Constructor to build a tree of given {@code height}
*/
TestDirectoryTree(int height, FileSystem fs) throws Exception {
this.height = height;
this.topNode = new Node(new Path("/TestSnapshot"), 0,
null, fs);
this.levelMap = new HashMap<Integer, ArrayList<Node>>();
addDirNode(topNode, 0);
genChildren(topNode, height - 1, fs);
}
/**
* Add a node into the levelMap
*/
private void addDirNode(Node node, int atLevel) {
ArrayList<Node> list = levelMap.get(atLevel);
if (list == null) {
list = new ArrayList<Node>();
levelMap.put(atLevel, list);
}
list.add(node);
}
int id = 0;
/**
* Recursively generate the tree based on the height.
*
* @param parent The parent node
* @param level The remaining levels to generate
* @param fs The FileSystem where to generate the files/dirs
* @throws Exception
*/
private void genChildren(Node parent, int level, FileSystem fs)
throws Exception {
if (level == 0) {
return;
}
parent.leftChild = new Node(new Path(parent.nodePath,
"left" + ++id), height - level, parent, fs);
parent.rightChild = new Node(new Path(parent.nodePath,
"right" + ++id), height - level, parent, fs);
addDirNode(parent.leftChild, parent.leftChild.level);
addDirNode(parent.rightChild, parent.rightChild.level);
genChildren(parent.leftChild, level - 1, fs);
genChildren(parent.rightChild, level - 1, fs);
}
/**
* Randomly retrieve a node from the directory tree.
*
* @param random A random instance passed by user.
* @param excludedList Excluded list, i.e., the randomly generated node
* cannot be one of the nodes in this list.
* @return a random node from the tree.
*/
Node getRandomDirNode(Random random, List<Node> excludedList) {
while (true) {
int level = random.nextInt(height);
ArrayList<Node> levelList = levelMap.get(level);
int index = random.nextInt(levelList.size());
Node randomNode = levelList.get(index);
if (excludedList == null || !excludedList.contains(randomNode)) {
return randomNode;
}
}
}
/**
* The class representing a node in {@link TestDirectoryTree}.
* <br>
* This contains:
* <ul>
* <li>Two children representing the two snapshottable directories</li>
* <li>A list of files for testing, so that we can check snapshots
* after file creation/deletion/modification.</li>
* <li>A list of non-snapshottable directories, to test snapshots with
* directory creation/deletion. Note that this is needed because the
* deletion of a snapshottale directory with snapshots is not allowed.</li>
* </ul>
*/
static class Node {
/** The level of this node in the directory tree */
final int level;
/** Children */
Node leftChild;
Node rightChild;
/** Parent node of the node */
final Node parent;
/** File path of the node */
final Path nodePath;
/**
* The file path list for testing snapshots before/after file
* creation/deletion/modification
*/
ArrayList<Path> fileList;
/**
* Each time for testing snapshots with file creation, since we do not
* want to insert new files into the fileList, we always create the file
* that was deleted last time. Thus we record the index for deleted file
* in the fileList, and roll the file modification forward in the list.
*/
int nullFileIndex = 0;
/**
* A list of non-snapshottable directories for testing snapshots with
* directory creation/deletion
*/
final ArrayList<Node> nonSnapshotChildren;
Node(Path path, int level, Node parent,
FileSystem fs) throws Exception {
this.nodePath = path;
this.level = level;
this.parent = parent;
this.nonSnapshotChildren = new ArrayList<Node>();
fs.mkdirs(nodePath);
}
/**
* Create files and add them in the fileList. Initially the last element
* in the fileList is set to null (where we start file creation).
*/
void initFileList(FileSystem fs, String namePrefix, long fileLen,
short replication, long seed, int numFiles) throws Exception {
fileList = new ArrayList<Path>(numFiles);
for (int i = 0; i < numFiles; i++) {
Path file = new Path(nodePath, namePrefix + "-f" + i);
fileList.add(file);
if (i < numFiles - 1) {
DFSTestUtil.createFile(fs, file, fileLen, replication, seed);
}
}
nullFileIndex = numFiles - 1;
}
@Override
public boolean equals(Object o) {
if (o != null && o instanceof Node) {
Node node = (Node) o;
return node.nodePath.equals(nodePath);
}
return false;
}
@Override
public int hashCode() {
return nodePath.hashCode();
}
}
}
public static void dumpTree(String message, MiniDFSCluster cluster
) throws UnresolvedLinkException {
System.out.println("XXX " + message);
cluster.getNameNode().getNamesystem().getFSDirectory().getINode("/"
).dumpTreeRecursively(System.out);
}
}
| 17,783
| 36.283019
| 99
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestUpdatePipelineWithSnapshots.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.io.IOUtils;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import org.junit.Test;
public class TestUpdatePipelineWithSnapshots {
// Regression test for HDFS-6647.
@Test
public void testUpdatePipelineAfterDelete() throws Exception {
Configuration conf = new HdfsConfiguration();
Path file = new Path("/test-file");
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
FileSystem fs = cluster.getFileSystem();
NamenodeProtocols namenode = cluster.getNameNodeRpc();
DFSOutputStream out = null;
try {
// Create a file and make sure a block is allocated for it.
out = (DFSOutputStream)(fs.create(file).
getWrappedStream());
out.write(1);
out.hflush();
// Create a snapshot that includes the file.
SnapshotTestHelper.createSnapshot((DistributedFileSystem) fs,
new Path("/"), "s1");
// Grab the block info of this file for later use.
FSDataInputStream in = null;
ExtendedBlock oldBlock = null;
try {
in = fs.open(file);
oldBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
} finally {
IOUtils.closeStream(in);
}
// Allocate a new block ID/gen stamp so we can simulate pipeline
// recovery.
String clientName = ((DistributedFileSystem)fs).getClient()
.getClientName();
LocatedBlock newLocatedBlock = namenode.updateBlockForPipeline(
oldBlock, clientName);
ExtendedBlock newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(),
oldBlock.getBlockId(), oldBlock.getNumBytes(),
newLocatedBlock.getBlock().getGenerationStamp());
// Delete the file from the present FS. It will still exist the
// previously-created snapshot. This will log an OP_DELETE for the
// file in question.
fs.delete(file, true);
// Simulate a pipeline recovery, wherein a new block is allocated
// for the existing block, resulting in an OP_UPDATE_BLOCKS being
// logged for the file in question.
try {
namenode.updatePipeline(clientName, oldBlock, newBlock,
newLocatedBlock.getLocations(), newLocatedBlock.getStorageIDs());
} catch (IOException ioe) {
// normal
assertExceptionContains(
"does not exist or it is not under construction", ioe);
}
// Make sure the NN can restart with the edit logs as we have them now.
cluster.restartNameNode(true);
} finally {
IOUtils.closeStream(out);
}
} finally {
cluster.shutdown();
}
}
}
| 4,277
| 37.890909
| 79
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestAclWithSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.*;
import static org.apache.hadoop.fs.permission.AclEntryScope.*;
import static org.apache.hadoop.fs.permission.AclEntryType.*;
import static org.apache.hadoop.fs.permission.FsAction.*;
import static org.junit.Assert.*;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException;
import org.apache.hadoop.hdfs.server.namenode.AclFeature;
import org.apache.hadoop.hdfs.server.namenode.AclStorage;
import org.apache.hadoop.hdfs.server.namenode.AclTestHelpers;
import org.apache.hadoop.hdfs.server.namenode.FSAclBaseTest;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
import com.google.common.collect.Lists;
/**
* Tests interaction of ACLs with snapshots.
*/
public class TestAclWithSnapshot {
private static final UserGroupInformation BRUCE =
UserGroupInformation.createUserForTesting("bruce", new String[] { });
private static final UserGroupInformation DIANA =
UserGroupInformation.createUserForTesting("diana", new String[] { });
private static MiniDFSCluster cluster;
private static Configuration conf;
private static FileSystem fsAsBruce, fsAsDiana;
private static DistributedFileSystem hdfs;
private static int pathCount = 0;
private static Path path, snapshotPath;
private static String snapshotName;
@Rule
public ExpectedException exception = ExpectedException.none();
@BeforeClass
public static void init() throws Exception {
conf = new Configuration();
conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
initCluster(true);
}
@AfterClass
public static void shutdown() throws Exception {
IOUtils.cleanup(null, hdfs, fsAsBruce, fsAsDiana);
if (cluster != null) {
cluster.shutdown();
}
}
@Before
public void setUp() {
++pathCount;
path = new Path("/p" + pathCount);
snapshotName = "snapshot" + pathCount;
snapshotPath = new Path(path, new Path(".snapshot", snapshotName));
}
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterChange()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(false);
doSnapshotRootChangeAssertions(path, snapshotPath);
restart(true);
doSnapshotRootChangeAssertions(path, snapshotPath);
}
private static void doSnapshotRootChangeAssertions(Path path,
Path snapshotPath) throws Exception {
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010550, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010750, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionGranted(fsAsDiana, DIANA, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterChange()
throws Exception {
Path filePath = new Path(path, "file1");
Path subdirPath = new Path(path, "subdir1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
(short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, subdirPath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
// Original has changed, but snapshot still has old ACL.
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(false);
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(true);
doSnapshotContentsChangeAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
}
private static void doSnapshotContentsChangeAssertions(Path filePath,
Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath)
throws Exception {
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "diana", ALL),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010570, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionGranted(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010570, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionGranted(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotRootAfterRemoval()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, ALL),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(path, aclSpec);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010750, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010750, snapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
hdfs.removeAcl(path);
// Original has changed, but snapshot still has old ACL.
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(false);
doSnapshotRootRemovalAssertions(path, snapshotPath);
restart(true);
doSnapshotRootRemovalAssertions(path, snapshotPath);
}
private static void doSnapshotRootRemovalAssertions(Path path,
Path snapshotPath) throws Exception {
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
assertPermission((short)0700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010750, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, snapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, snapshotPath);
}
@Test
public void testOriginalAclEnforcedForSnapshotContentsAfterRemoval()
throws Exception {
Path filePath = new Path(path, "file1");
Path subdirPath = new Path(path, "subdir1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
Path subdirSnapshotPath = new Path(snapshotPath, "subdir1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0777));
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
FileSystem.mkdirs(hdfs, subdirPath, FsPermission.createImmutable(
(short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, READ_EXECUTE),
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE),
aclEntry(ACCESS, OTHER, NONE));
hdfs.setAcl(filePath, aclSpec);
hdfs.setAcl(subdirPath, aclSpec);
assertFilePermissionGranted(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
// Both original and snapshot still have same ACL.
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, subdirPath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
hdfs.removeAcl(filePath);
hdfs.removeAcl(subdirPath);
// Original has changed, but snapshot still has old ACL.
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(false);
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
restart(true);
doSnapshotContentsRemovalAssertions(filePath, fileSnapshotPath, subdirPath,
subdirSnapshotPath);
}
private static void doSnapshotContentsRemovalAssertions(Path filePath,
Path fileSnapshotPath, Path subdirPath, Path subdirSnapshotPath)
throws Exception {
AclEntry[] expected = new AclEntry[] { };
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0500, filePath);
assertFilePermissionDenied(fsAsBruce, BRUCE, filePath);
assertFilePermissionDenied(fsAsDiana, DIANA, filePath);
s = hdfs.getAclStatus(subdirPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0500, subdirPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, subdirPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirPath);
expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, fileSnapshotPath);
assertFilePermissionGranted(fsAsBruce, BRUCE, fileSnapshotPath);
assertFilePermissionDenied(fsAsDiana, DIANA, fileSnapshotPath);
s = hdfs.getAclStatus(subdirSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010550, subdirSnapshotPath);
assertDirPermissionGranted(fsAsBruce, BRUCE, subdirSnapshotPath);
assertDirPermissionDenied(fsAsDiana, DIANA, subdirSnapshotPath);
}
@Test
public void testModifyReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
hdfs.modifyAclEntries(path, aclSpec);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "diana", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
AclEntry[] expected = new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", ALL),
aclEntry(ACCESS, USER, "diana", READ_EXECUTE),
aclEntry(ACCESS, GROUP, NONE) };
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)010770, path);
assertDirPermissionGranted(fsAsBruce, BRUCE, path);
assertDirPermissionGranted(fsAsDiana, DIANA, path);
}
@Test
public void testRemoveReadsCurrentState() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", ALL));
hdfs.modifyAclEntries(path, aclSpec);
hdfs.removeAcl(path);
AclEntry[] expected = new AclEntry[] { };
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(expected, returned);
assertPermission((short)0700, path);
assertDirPermissionDenied(fsAsBruce, BRUCE, path);
assertDirPermissionDenied(fsAsDiana, DIANA, path);
}
@Test
public void testDefaultAclNotCopiedToAccessAclOfNewSnapshot()
throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
hdfs.modifyAclEntries(path, aclSpec);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(path);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010700, path);
s = hdfs.getAclStatus(snapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(DEFAULT, USER, ALL),
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE),
aclEntry(DEFAULT, GROUP, NONE),
aclEntry(DEFAULT, MASK, READ_EXECUTE),
aclEntry(DEFAULT, OTHER, NONE) }, returned);
assertPermission((short)010700, snapshotPath);
assertDirPermissionDenied(fsAsBruce, BRUCE, snapshotPath);
}
@Test
public void testModifyAclEntriesSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce", READ_EXECUTE));
exception.expect(SnapshotAccessControlException.class);
hdfs.modifyAclEntries(snapshotPath, aclSpec);
}
@Test
public void testRemoveAclEntriesSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce"));
exception.expect(SnapshotAccessControlException.class);
hdfs.removeAclEntries(snapshotPath, aclSpec);
}
@Test
public void testRemoveDefaultAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeDefaultAcl(snapshotPath);
}
@Test
public void testRemoveAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
exception.expect(SnapshotAccessControlException.class);
hdfs.removeAcl(snapshotPath);
}
@Test
public void testSetAclSnapshotPath() throws Exception {
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0700));
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(DEFAULT, USER, "bruce"));
exception.expect(SnapshotAccessControlException.class);
hdfs.setAcl(snapshotPath, aclSpec);
}
@Test
public void testChangeAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010660, filePath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
hdfs.modifyAclEntries(filePath, aclSpec);
}
@Test
public void testRemoveAclExceedsQuota() throws Exception {
Path filePath = new Path(path, "file1");
Path fileSnapshotPath = new Path(snapshotPath, "file1");
FileSystem.mkdirs(hdfs, path, FsPermission.createImmutable((short)0755));
hdfs.allowSnapshot(path);
hdfs.setQuota(path, 3, HdfsConstants.QUOTA_DONT_SET);
FileSystem.create(hdfs, filePath, FsPermission.createImmutable((short)0600))
.close();
hdfs.setPermission(filePath, FsPermission.createImmutable((short)0600));
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ_WRITE));
hdfs.modifyAclEntries(filePath, aclSpec);
hdfs.createSnapshot(path, snapshotName);
AclStatus s = hdfs.getAclStatus(filePath);
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010660, filePath);
s = hdfs.getAclStatus(fileSnapshotPath);
returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] {
aclEntry(ACCESS, USER, "bruce", READ_WRITE),
aclEntry(ACCESS, GROUP, NONE) }, returned);
assertPermission((short)010660, filePath);
aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "bruce", READ));
hdfs.removeAcl(filePath);
}
@Test
public void testGetAclStatusDotSnapshotPath() throws Exception {
hdfs.mkdirs(path);
SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
AclStatus s = hdfs.getAclStatus(new Path(path, ".snapshot"));
AclEntry[] returned = s.getEntries().toArray(new AclEntry[0]);
assertArrayEquals(new AclEntry[] { }, returned);
}
@Test
public void testDeDuplication() throws Exception {
int startSize = AclStorage.getUniqueAclFeatures().getUniqueElementsSize();
// unique default AclEntries for this test
List<AclEntry> aclSpec = Lists.newArrayList(
aclEntry(ACCESS, USER, "testdeduplicateuser", ALL),
aclEntry(ACCESS, GROUP, "testdeduplicategroup", ALL));
hdfs.mkdirs(path);
hdfs.modifyAclEntries(path, aclSpec);
assertEquals("One more ACL feature should be unique", startSize + 1,
AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
Path subdir = new Path(path, "sub-dir");
hdfs.mkdirs(subdir);
Path file = new Path(path, "file");
hdfs.create(file).close();
AclFeature aclFeature;
{
// create the snapshot with root directory having ACLs should refer to
// same ACLFeature without incrementing the reference count
aclFeature = FSAclBaseTest.getAclFeature(path, cluster);
assertEquals("Reference count should be one before snapshot", 1,
aclFeature.getRefCount());
Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
snapshotName);
AclFeature snapshotAclFeature = FSAclBaseTest.getAclFeature(snapshotPath,
cluster);
assertSame(aclFeature, snapshotAclFeature);
assertEquals("Reference count should be increased", 2,
snapshotAclFeature.getRefCount());
}
{
// deleting the snapshot with root directory having ACLs should not alter
// the reference count of the ACLFeature
deleteSnapshotWithAclAndVerify(aclFeature, path, startSize);
}
{
hdfs.modifyAclEntries(subdir, aclSpec);
aclFeature = FSAclBaseTest.getAclFeature(subdir, cluster);
assertEquals("Reference count should be 1", 1, aclFeature.getRefCount());
Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
snapshotName);
Path subdirInSnapshot = new Path(snapshotPath, "sub-dir");
AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(subdirInSnapshot,
cluster);
assertSame(aclFeature, snapshotAcl);
assertEquals("Reference count should remain same", 1,
aclFeature.getRefCount());
// Delete the snapshot with sub-directory containing the ACLs should not
// alter the reference count for AclFeature
deleteSnapshotWithAclAndVerify(aclFeature, subdir, startSize);
}
{
hdfs.modifyAclEntries(file, aclSpec);
aclFeature = FSAclBaseTest.getAclFeature(file, cluster);
assertEquals("Reference count should be 1", 1, aclFeature.getRefCount());
Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
snapshotName);
Path fileInSnapshot = new Path(snapshotPath, file.getName());
AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(fileInSnapshot,
cluster);
assertSame(aclFeature, snapshotAcl);
assertEquals("Reference count should remain same", 1,
aclFeature.getRefCount());
// Delete the snapshot with contained file having ACLs should not
// alter the reference count for AclFeature
deleteSnapshotWithAclAndVerify(aclFeature, file, startSize);
}
{
// Modifying the ACLs of root directory of the snapshot should refer new
// AclFeature. And old AclFeature should be referenced by snapshot
hdfs.modifyAclEntries(path, aclSpec);
Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
snapshotName);
AclFeature snapshotAcl = FSAclBaseTest.getAclFeature(snapshotPath,
cluster);
aclFeature = FSAclBaseTest.getAclFeature(path, cluster);
assertEquals("Before modification same ACL should be referenced twice", 2,
aclFeature.getRefCount());
List<AclEntry> newAcl = Lists.newArrayList(aclEntry(ACCESS, USER,
"testNewUser", ALL));
hdfs.modifyAclEntries(path, newAcl);
aclFeature = FSAclBaseTest.getAclFeature(path, cluster);
AclFeature snapshotAclPostModification = FSAclBaseTest.getAclFeature(
snapshotPath, cluster);
assertSame(snapshotAcl, snapshotAclPostModification);
assertNotSame(aclFeature, snapshotAclPostModification);
assertEquals("Old ACL feature reference count should be same", 1,
snapshotAcl.getRefCount());
assertEquals("New ACL feature reference should be used", 1,
aclFeature.getRefCount());
deleteSnapshotWithAclAndVerify(aclFeature, path, startSize);
}
{
// Modifying the ACLs of sub directory of the snapshot root should refer
// new AclFeature. And old AclFeature should be referenced by snapshot
hdfs.modifyAclEntries(subdir, aclSpec);
Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
snapshotName);
Path subdirInSnapshot = new Path(snapshotPath, "sub-dir");
AclFeature snapshotAclFeature = FSAclBaseTest.getAclFeature(
subdirInSnapshot, cluster);
List<AclEntry> newAcl = Lists.newArrayList(aclEntry(ACCESS, USER,
"testNewUser", ALL));
hdfs.modifyAclEntries(subdir, newAcl);
aclFeature = FSAclBaseTest.getAclFeature(subdir, cluster);
assertNotSame(aclFeature, snapshotAclFeature);
assertEquals("Reference count should remain same", 1,
snapshotAclFeature.getRefCount());
assertEquals("New AclFeature should be used", 1, aclFeature.getRefCount());
deleteSnapshotWithAclAndVerify(aclFeature, subdir, startSize);
}
{
// Modifying the ACLs of file inside the snapshot root should refer new
// AclFeature. And old AclFeature should be referenced by snapshot
hdfs.modifyAclEntries(file, aclSpec);
Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
snapshotName);
Path fileInSnapshot = new Path(snapshotPath, file.getName());
AclFeature snapshotAclFeature = FSAclBaseTest.getAclFeature(
fileInSnapshot, cluster);
List<AclEntry> newAcl = Lists.newArrayList(aclEntry(ACCESS, USER,
"testNewUser", ALL));
hdfs.modifyAclEntries(file, newAcl);
aclFeature = FSAclBaseTest.getAclFeature(file, cluster);
assertNotSame(aclFeature, snapshotAclFeature);
assertEquals("Reference count should remain same", 1,
snapshotAclFeature.getRefCount());
deleteSnapshotWithAclAndVerify(aclFeature, file, startSize);
}
{
// deleting the original directory containing dirs and files with ACLs
// with snapshot
hdfs.delete(path, true);
Path dir = new Path(subdir, "dir");
hdfs.mkdirs(dir);
hdfs.modifyAclEntries(dir, aclSpec);
file = new Path(subdir, "file");
hdfs.create(file).close();
aclSpec.add(aclEntry(ACCESS, USER, "testNewUser", ALL));
hdfs.modifyAclEntries(file, aclSpec);
AclFeature fileAcl = FSAclBaseTest.getAclFeature(file, cluster);
AclFeature dirAcl = FSAclBaseTest.getAclFeature(dir, cluster);
Path snapshotPath = SnapshotTestHelper.createSnapshot(hdfs, path,
snapshotName);
Path dirInSnapshot = new Path(snapshotPath, "sub-dir/dir");
AclFeature snapshotDirAclFeature = FSAclBaseTest.getAclFeature(
dirInSnapshot, cluster);
Path fileInSnapshot = new Path(snapshotPath, "sub-dir/file");
AclFeature snapshotFileAclFeature = FSAclBaseTest.getAclFeature(
fileInSnapshot, cluster);
assertSame(fileAcl, snapshotFileAclFeature);
assertSame(dirAcl, snapshotDirAclFeature);
hdfs.delete(subdir, true);
assertEquals(
"Original ACLs references should be maintained for snapshot", 1,
snapshotFileAclFeature.getRefCount());
assertEquals(
"Original ACLs references should be maintained for snapshot", 1,
snapshotDirAclFeature.getRefCount());
hdfs.deleteSnapshot(path, snapshotName);
assertEquals("ACLs should be deleted from snapshot", startSize, AclStorage
.getUniqueAclFeatures().getUniqueElementsSize());
}
}
private void deleteSnapshotWithAclAndVerify(AclFeature aclFeature,
Path pathToCheckAcl, int totalAclFeatures) throws IOException {
hdfs.deleteSnapshot(path, snapshotName);
AclFeature afterDeleteAclFeature = FSAclBaseTest.getAclFeature(
pathToCheckAcl, cluster);
assertSame(aclFeature, afterDeleteAclFeature);
assertEquals("Reference count should remain same"
+ " even after deletion of snapshot", 1,
afterDeleteAclFeature.getRefCount());
hdfs.removeAcl(pathToCheckAcl);
assertEquals("Reference count should be 0", 0, aclFeature.getRefCount());
assertEquals("Unique ACL features should remain same", totalAclFeatures,
AclStorage.getUniqueAclFeatures().getUniqueElementsSize());
}
/**
* Asserts that permission is denied to the given fs/user for the given
* directory.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path directory to check
* @throws Exception if there is an unexpected error
*/
private static void assertDirPermissionDenied(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
fail("expected AccessControlException for user " + user + ", path = " +
pathToCheck);
} catch (AccessControlException e) {
// expected
}
try {
fs.access(pathToCheck, FsAction.READ);
fail("The access call should have failed for "+pathToCheck);
} catch (AccessControlException e) {
// expected
}
}
/**
* Asserts that permission is granted to the given fs/user for the given
* directory.
*
* @param fs FileSystem to check
* @param user UserGroupInformation owner of fs
* @param pathToCheck Path directory to check
* @throws Exception if there is an unexpected error
*/
private static void assertDirPermissionGranted(FileSystem fs,
UserGroupInformation user, Path pathToCheck) throws Exception {
try {
fs.listStatus(pathToCheck);
fs.access(pathToCheck, FsAction.READ);
} catch (AccessControlException e) {
fail("expected permission granted for user " + user + ", path = " +
pathToCheck);
}
}
/**
* Asserts the value of the FsPermission bits on the inode of the test path.
*
* @param perm short expected permission bits
* @param pathToCheck Path to check
* @throws Exception thrown if there is an unexpected error
*/
private static void assertPermission(short perm, Path pathToCheck)
throws Exception {
AclTestHelpers.assertPermission(hdfs, pathToCheck, perm);
}
/**
* Initialize the cluster, wait for it to become active, and get FileSystem
* instances for our test users.
*
* @param format if true, format the NameNode and DataNodes before starting up
* @throws Exception if any step fails
*/
private static void initCluster(boolean format) throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).format(format)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
fsAsBruce = DFSTestUtil.getFileSystemAs(BRUCE, conf);
fsAsDiana = DFSTestUtil.getFileSystemAs(DIANA, conf);
}
/**
* Restart the cluster, optionally saving a new checkpoint.
*
* @param checkpoint boolean true to save a new checkpoint
* @throws Exception if restart fails
*/
private static void restart(boolean checkpoint) throws Exception {
NameNode nameNode = cluster.getNameNode();
if (checkpoint) {
NameNodeAdapter.enterSafeMode(nameNode, false);
NameNodeAdapter.saveNamespace(nameNode);
}
shutdown();
initCluster(false);
}
}
| 38,562
| 40.24385
| 81
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.ByteArrayOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.PrintStream;
import java.security.PrivilegedAction;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.namenode.QuotaCounts;
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiffList;
import org.apache.hadoop.hdfs.util.ReadOnlyList;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
/**
* Tests snapshot deletion.
*/
public class TestSnapshotDeletion {
protected static final long seed = 0;
protected static final short REPLICATION = 3;
protected static final short REPLICATION_1 = 2;
protected static final long BLOCKSIZE = 1024;
private final Path dir = new Path("/TestSnapshot");
private final Path sub = new Path(dir, "sub1");
private final Path subsub = new Path(sub, "subsub1");
protected Configuration conf;
protected MiniDFSCluster cluster;
protected FSNamesystem fsn;
protected FSDirectory fsdir;
protected BlockManager blockmanager;
protected DistributedFileSystem hdfs;
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.format(true).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
blockmanager = fsn.getBlockManager();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Deleting snapshottable directory with snapshots must fail.
*/
@Test (timeout=300000)
public void testDeleteDirectoryWithSnapshot() throws Exception {
Path file0 = new Path(sub, "file0");
Path file1 = new Path(sub, "file1");
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
// Allow snapshot for sub1, and create snapshot for it
hdfs.allowSnapshot(sub);
hdfs.createSnapshot(sub, "s1");
// Deleting a snapshottable dir with snapshots should fail
exception.expect(RemoteException.class);
String error = "The directory " + sub.toString()
+ " cannot be deleted since " + sub.toString()
+ " is snapshottable and already has snapshots";
exception.expectMessage(error);
hdfs.delete(sub, true);
}
/**
* Test applying editlog of operation which deletes a snapshottable directory
* without snapshots. The snapshottable dir list in snapshot manager should be
* updated.
*/
@Test (timeout=300000)
public void testApplyEditLogForDeletion() throws Exception {
final Path foo = new Path("/foo");
final Path bar1 = new Path(foo, "bar1");
final Path bar2 = new Path(foo, "bar2");
hdfs.mkdirs(bar1);
hdfs.mkdirs(bar2);
// allow snapshots on bar1 and bar2
hdfs.allowSnapshot(bar1);
hdfs.allowSnapshot(bar2);
assertEquals(2, cluster.getNamesystem().getSnapshotManager()
.getNumSnapshottableDirs());
assertEquals(2, cluster.getNamesystem().getSnapshotManager()
.getSnapshottableDirs().length);
// delete /foo
hdfs.delete(foo, true);
cluster.restartNameNode(0);
// the snapshottable dir list in snapshot manager should be empty
assertEquals(0, cluster.getNamesystem().getSnapshotManager()
.getNumSnapshottableDirs());
assertEquals(0, cluster.getNamesystem().getSnapshotManager()
.getSnapshottableDirs().length);
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
cluster.restartNameNode(0);
}
/**
* Deleting directory with snapshottable descendant with snapshots must fail.
*/
@Test (timeout=300000)
public void testDeleteDirectoryWithSnapshot2() throws Exception {
Path file0 = new Path(sub, "file0");
Path file1 = new Path(sub, "file1");
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
Path subfile1 = new Path(subsub, "file0");
Path subfile2 = new Path(subsub, "file1");
DFSTestUtil.createFile(hdfs, subfile1, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, subfile2, BLOCKSIZE, REPLICATION, seed);
// Allow snapshot for subsub1, and create snapshot for it
hdfs.allowSnapshot(subsub);
hdfs.createSnapshot(subsub, "s1");
// Deleting dir while its descedant subsub1 having snapshots should fail
exception.expect(RemoteException.class);
String error = subsub.toString()
+ " is snapshottable and already has snapshots";
exception.expectMessage(error);
hdfs.delete(dir, true);
}
private static INodeDirectory getDir(final FSDirectory fsdir, final Path dir)
throws IOException {
final String dirStr = dir.toString();
return INodeDirectory.valueOf(fsdir.getINode(dirStr), dirStr);
}
private void checkQuotaUsageComputation(final Path dirPath,
final long expectedNs, final long expectedDs) throws IOException {
INodeDirectory dirNode = getDir(fsdir, dirPath);
assertTrue(dirNode.isQuotaSet());
QuotaCounts q = dirNode.getDirectoryWithQuotaFeature().getSpaceConsumed();
assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
q.getNameSpace());
assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
q.getStorageSpace());
QuotaCounts counts = dirNode.computeQuotaUsage(fsdir.getBlockStoragePolicySuite(), false);
assertEquals(dirNode.dumpTreeRecursively().toString(), expectedNs,
counts.getNameSpace());
assertEquals(dirNode.dumpTreeRecursively().toString(), expectedDs,
counts.getStorageSpace());
}
/**
* Test deleting a directory which is a descendant of a snapshottable
* directory. In the test we need to cover the following cases:
*
* <pre>
* 1. Delete current INodeFile/INodeDirectory without taking any snapshot.
* 2. Delete current INodeFile/INodeDirectory while snapshots have been taken
* on ancestor(s).
* 3. Delete current INodeFileWithSnapshot.
* 4. Delete current INodeDirectoryWithSnapshot.
* </pre>
*/
@Test (timeout=300000)
public void testDeleteCurrentFileDirectory() throws Exception {
// create a folder which will be deleted before taking snapshots
Path deleteDir = new Path(subsub, "deleteDir");
Path deleteFile = new Path(deleteDir, "deleteFile");
// create a directory that we will not change during the whole process.
Path noChangeDirParent = new Path(sub, "noChangeDirParent");
Path noChangeDir = new Path(noChangeDirParent, "noChangeDir");
// create a file that we will not change in the future
Path noChangeFile = new Path(noChangeDir, "noChangeFile");
DFSTestUtil.createFile(hdfs, deleteFile, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
// we will change this file's metadata in the future
Path metaChangeFile1 = new Path(subsub, "metaChangeFile1");
DFSTestUtil.createFile(hdfs, metaChangeFile1, BLOCKSIZE, REPLICATION, seed);
// another file, created under noChangeDir, whose metadata will be changed
Path metaChangeFile2 = new Path(noChangeDir, "metaChangeFile2");
DFSTestUtil.createFile(hdfs, metaChangeFile2, BLOCKSIZE, REPLICATION, seed);
// Case 1: delete deleteDir before taking snapshots
hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
checkQuotaUsageComputation(dir, 10, BLOCKSIZE * REPLICATION * 4);
hdfs.delete(deleteDir, true);
checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
// create snapshot s0
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
// after creating snapshot s0, create a directory tempdir under dir and then
// delete dir immediately
Path tempDir = new Path(dir, "tempdir");
Path tempFile = new Path(tempDir, "tempfile");
DFSTestUtil.createFile(hdfs, tempFile, BLOCKSIZE, REPLICATION, seed);
final INodeFile temp = TestSnapshotBlocksMap.assertBlockCollection(
tempFile.toString(), 1, fsdir, blockmanager);
BlockInfo[] blocks = temp.getBlocks();
hdfs.delete(tempDir, true);
// check dir's quota usage
checkQuotaUsageComputation(dir, 8, BLOCKSIZE * REPLICATION * 3);
// check blocks of tempFile
for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
// make a change: create a new file under subsub
Path newFileAfterS0 = new Path(subsub, "newFile");
DFSTestUtil.createFile(hdfs, newFileAfterS0, BLOCKSIZE, REPLICATION, seed);
// further change: change the replicator factor of metaChangeFile
hdfs.setReplication(metaChangeFile1, REPLICATION_1);
hdfs.setReplication(metaChangeFile2, REPLICATION_1);
// create snapshot s1
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
// check dir's quota usage
checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
// get two snapshots for later use
Snapshot snapshot0 = fsdir.getINode(dir.toString()).asDirectory()
.getSnapshot(DFSUtil.string2Bytes("s0"));
Snapshot snapshot1 = fsdir.getINode(dir.toString()).asDirectory()
.getSnapshot(DFSUtil.string2Bytes("s1"));
// Case 2 + Case 3: delete noChangeDirParent, noChangeFile, and
// metaChangeFile2. Note that when we directly delete a directory, the
// directory will be converted to an INodeDirectoryWithSnapshot. To make
// sure the deletion goes through an INodeDirectory, we delete the parent
// of noChangeDir
hdfs.delete(noChangeDirParent, true);
// while deletion, we add a diff for metaChangeFile2 as its snapshot copy
// for s1, we also add diffs for both sub and noChangeDirParent
checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
// check the snapshot copy of noChangeDir
Path snapshotNoChangeDir = SnapshotTestHelper.getSnapshotPath(dir, "s1",
sub.getName() + "/" + noChangeDirParent.getName() + "/"
+ noChangeDir.getName());
INodeDirectory snapshotNode =
(INodeDirectory) fsdir.getINode(snapshotNoChangeDir.toString());
// should still be an INodeDirectory
assertEquals(INodeDirectory.class, snapshotNode.getClass());
ReadOnlyList<INode> children = snapshotNode
.getChildrenList(Snapshot.CURRENT_STATE_ID);
// check 2 children: noChangeFile and metaChangeFile2
assertEquals(2, children.size());
INode noChangeFileSCopy = children.get(1);
assertEquals(noChangeFile.getName(), noChangeFileSCopy.getLocalName());
assertEquals(INodeFile.class, noChangeFileSCopy.getClass());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
noChangeFileSCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
INodeFile metaChangeFile2SCopy = children.get(0).asFile();
assertEquals(metaChangeFile2.getName(), metaChangeFile2SCopy.getLocalName());
assertTrue(metaChangeFile2SCopy.isWithSnapshot());
assertFalse(metaChangeFile2SCopy.isUnderConstruction());
TestSnapshotBlocksMap.assertBlockCollection(new Path(snapshotNoChangeDir,
metaChangeFile2SCopy.getLocalName()).toString(), 1, fsdir, blockmanager);
// check the replication factor of metaChangeFile2SCopy
assertEquals(REPLICATION_1,
metaChangeFile2SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,
metaChangeFile2SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,
metaChangeFile2SCopy.getFileReplication(snapshot0.getId()));
// Case 4: delete directory sub
// before deleting sub, we first create a new file under sub
Path newFile = new Path(sub, "newFile");
DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
final INodeFile newFileNode = TestSnapshotBlocksMap.assertBlockCollection(
newFile.toString(), 1, fsdir, blockmanager);
blocks = newFileNode.getBlocks();
checkQuotaUsageComputation(dir, 10L, BLOCKSIZE * REPLICATION * 5);
hdfs.delete(sub, true);
// while deletion, we add diff for subsub and metaChangeFile1, and remove
// newFile
checkQuotaUsageComputation(dir, 9L, BLOCKSIZE * REPLICATION * 4);
for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
// make sure the whole subtree of sub is stored correctly in snapshot
Path snapshotSub = SnapshotTestHelper.getSnapshotPath(dir, "s1",
sub.getName());
INodeDirectory snapshotNode4Sub = fsdir.getINode(snapshotSub.toString())
.asDirectory();
assertTrue(snapshotNode4Sub.isWithSnapshot());
// the snapshot copy of sub has only one child subsub.
// newFile should have been destroyed
assertEquals(1, snapshotNode4Sub.getChildrenList(Snapshot.CURRENT_STATE_ID)
.size());
// but should have two children, subsub and noChangeDir, when s1 was taken
assertEquals(2, snapshotNode4Sub.getChildrenList(snapshot1.getId()).size());
// check the snapshot copy of subsub, which is contained in the subtree of
// sub's snapshot copy
INode snapshotNode4Subsub = snapshotNode4Sub.getChildrenList(
Snapshot.CURRENT_STATE_ID).get(0);
assertTrue(snapshotNode4Subsub.asDirectory().isWithSnapshot());
assertTrue(snapshotNode4Sub == snapshotNode4Subsub.getParent());
// check the children of subsub
INodeDirectory snapshotSubsubDir = (INodeDirectory) snapshotNode4Subsub;
children = snapshotSubsubDir.getChildrenList(Snapshot.CURRENT_STATE_ID);
assertEquals(2, children.size());
assertEquals(children.get(0).getLocalName(), metaChangeFile1.getName());
assertEquals(children.get(1).getLocalName(), newFileAfterS0.getName());
// only one child before snapshot s0
children = snapshotSubsubDir.getChildrenList(snapshot0.getId());
assertEquals(1, children.size());
INode child = children.get(0);
assertEquals(child.getLocalName(), metaChangeFile1.getName());
// check snapshot copy of metaChangeFile1
INodeFile metaChangeFile1SCopy = child.asFile();
assertTrue(metaChangeFile1SCopy.isWithSnapshot());
assertFalse(metaChangeFile1SCopy.isUnderConstruction());
assertEquals(REPLICATION_1,
metaChangeFile1SCopy.getFileReplication(Snapshot.CURRENT_STATE_ID));
assertEquals(REPLICATION_1,
metaChangeFile1SCopy.getFileReplication(snapshot1.getId()));
assertEquals(REPLICATION,
metaChangeFile1SCopy.getFileReplication(snapshot0.getId()));
}
/**
* Test deleting the earliest (first) snapshot. In this simplest scenario, the
* snapshots are taken on the same directory, and we do not need to combine
* snapshot diffs.
*/
@Test (timeout=300000)
public void testDeleteEarliestSnapshot1() throws Exception {
// create files under sub
Path file0 = new Path(sub, "file0");
Path file1 = new Path(sub, "file1");
DFSTestUtil.createFile(hdfs, file0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
String snapshotName = "s1";
try {
hdfs.deleteSnapshot(sub, snapshotName);
fail("SnapshotException expected: " + sub.toString()
+ " is not snapshottable yet");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory: " + sub, e);
}
// make sub snapshottable
hdfs.allowSnapshot(sub);
try {
hdfs.deleteSnapshot(sub, snapshotName);
fail("SnapshotException expected: snapshot " + snapshotName
+ " does not exist for " + sub.toString());
} catch (Exception e) {
GenericTestUtils.assertExceptionContains("Cannot delete snapshot "
+ snapshotName + " from path " + sub.toString()
+ ": the snapshot does not exist.", e);
}
// create snapshot s1 for sub
SnapshotTestHelper.createSnapshot(hdfs, sub, snapshotName);
// check quota usage computation
checkQuotaUsageComputation(sub, 3, BLOCKSIZE * REPLICATION * 2);
// delete s1
hdfs.deleteSnapshot(sub, snapshotName);
checkQuotaUsageComputation(sub, 3, BLOCKSIZE * REPLICATION * 2);
// now we can create a snapshot with the same name
hdfs.createSnapshot(sub, snapshotName);
checkQuotaUsageComputation(sub, 3, BLOCKSIZE * REPLICATION * 2);
// create a new file under sub
Path newFile = new Path(sub, "newFile");
DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
// create another snapshot s2
String snapshotName2 = "s2";
hdfs.createSnapshot(sub, snapshotName2);
checkQuotaUsageComputation(sub, 4, BLOCKSIZE * REPLICATION * 3);
// Get the filestatus of sub under snapshot s2
Path ss = SnapshotTestHelper
.getSnapshotPath(sub, snapshotName2, "newFile");
FileStatus statusBeforeDeletion = hdfs.getFileStatus(ss);
// delete s1
hdfs.deleteSnapshot(sub, snapshotName);
checkQuotaUsageComputation(sub, 4, BLOCKSIZE * REPLICATION * 3);
FileStatus statusAfterDeletion = hdfs.getFileStatus(ss);
System.out.println("Before deletion: " + statusBeforeDeletion.toString()
+ "\n" + "After deletion: " + statusAfterDeletion.toString());
assertEquals(statusBeforeDeletion.toString(),
statusAfterDeletion.toString());
}
/**
* Test deleting the earliest (first) snapshot. In this more complicated
* scenario, the snapshots are taken across directories.
* <pre>
* The test covers the following scenarios:
* 1. delete the first diff in the diff list of a directory
* 2. delete the first diff in the diff list of a file
* </pre>
* Also, the recursive cleanTree process should cover both INodeFile and
* INodeDirectory.
*/
@Test (timeout=300000)
public void testDeleteEarliestSnapshot2() throws Exception {
Path noChangeDir = new Path(sub, "noChangeDir");
Path noChangeFile = new Path(noChangeDir, "noChangeFile");
Path metaChangeFile = new Path(noChangeDir, "metaChangeFile");
Path metaChangeDir = new Path(noChangeDir, "metaChangeDir");
Path toDeleteFile = new Path(metaChangeDir, "toDeleteFile");
DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, metaChangeFile, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, toDeleteFile, BLOCKSIZE, REPLICATION, seed);
final INodeFile toDeleteFileNode = TestSnapshotBlocksMap
.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
BlockInfo[] blocks = toDeleteFileNode.getBlocks();
// create snapshot s0 on dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
hdfs.delete(toDeleteFile, true);
// the deletion adds diff of toDeleteFile and metaChangeDir
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
// /TestSnapshot/sub/noChangeDir/metaChangeFile
hdfs.setReplication(metaChangeFile, REPLICATION_1);
hdfs.setOwner(metaChangeDir, "unknown", "unknown");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// create snapshot s1 on dir
hdfs.createSnapshot(dir, "s1");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// delete snapshot s0
hdfs.deleteSnapshot(dir, "s0");
// namespace: remove toDeleteFile and its diff, metaChangeFile's diff,
// metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
// metaChangeFile's replication factor decreases
checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
for (BlockInfo b : blocks) {
assertNull(blockmanager.getBlockCollection(b));
}
// check 1. there is no snapshot s0
final INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0);
Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
DirectoryDiffList diffList = dirNode.getDiffs();
assertEquals(1, diffList.asList().size());
assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId());
diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory()
.getDiffs();
assertEquals(0, diffList.asList().size());
// check 2. noChangeDir and noChangeFile are still there
final INodeDirectory noChangeDirNode =
(INodeDirectory) fsdir.getINode(noChangeDir.toString());
assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
final INodeFile noChangeFileNode =
(INodeFile) fsdir.getINode(noChangeFile.toString());
assertEquals(INodeFile.class, noChangeFileNode.getClass());
TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1,
fsdir, blockmanager);
// check 3: current metadata of metaChangeFile and metaChangeDir
FileStatus status = hdfs.getFileStatus(metaChangeDir);
assertEquals("unknown", status.getOwner());
assertEquals("unknown", status.getGroup());
status = hdfs.getFileStatus(metaChangeFile);
assertEquals(REPLICATION_1, status.getReplication());
TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(), 1,
fsdir, blockmanager);
// check 4: no snapshot copy for toDeleteFile
try {
hdfs.getFileStatus(toDeleteFile);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: "
+ toDeleteFile.toString(), e);
}
final Path toDeleteFileInSnapshot = SnapshotTestHelper.getSnapshotPath(dir,
"s0", toDeleteFile.toString().substring(dir.toString().length()));
try {
hdfs.getFileStatus(toDeleteFileInSnapshot);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: "
+ toDeleteFileInSnapshot.toString(), e);
}
}
/**
* Delete a snapshot that is taken before a directory deletion,
* directory diff list should be combined correctly.
*/
@Test (timeout=60000)
public void testDeleteSnapshot1() throws Exception {
final Path root = new Path("/");
Path dir = new Path("/dir1");
Path file1 = new Path(dir, "file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root, "s1");
Path file2 = new Path(dir, "file2");
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
hdfs.createSnapshot(root, "s2");
// delete file
hdfs.delete(file1, true);
hdfs.delete(file2, true);
// delete directory
assertTrue(hdfs.delete(dir, false));
// delete second snapshot
hdfs.deleteSnapshot(root, "s2");
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
NameNodeAdapter.saveNamespace(cluster.getNameNode());
// restart NN
cluster.restartNameNodes();
}
/**
* Delete a snapshot that is taken before a directory deletion (recursively),
* directory diff list should be combined correctly.
*/
@Test (timeout=60000)
public void testDeleteSnapshot2() throws Exception {
final Path root = new Path("/");
Path dir = new Path("/dir1");
Path file1 = new Path(dir, "file1");
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
hdfs.allowSnapshot(root);
hdfs.createSnapshot(root, "s1");
Path file2 = new Path(dir, "file2");
DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed);
INodeFile file2Node = fsdir.getINode(file2.toString()).asFile();
long file2NodeId = file2Node.getId();
hdfs.createSnapshot(root, "s2");
// delete directory recursively
assertTrue(hdfs.delete(dir, true));
assertNotNull(fsdir.getInode(file2NodeId));
// delete second snapshot
hdfs.deleteSnapshot(root, "s2");
assertTrue(fsdir.getInode(file2NodeId) == null);
NameNodeAdapter.enterSafeMode(cluster.getNameNode(), false);
NameNodeAdapter.saveNamespace(cluster.getNameNode());
// restart NN
cluster.restartNameNodes();
}
/**
* Test deleting snapshots in a more complicated scenario: need to combine
* snapshot diffs, but no need to handle diffs distributed in a dir tree
*/
@Test (timeout=300000)
public void testCombineSnapshotDiff1() throws Exception {
testCombineSnapshotDiffImpl(sub, "", 1);
}
/**
* Test deleting snapshots in more complicated scenarios (snapshot diffs are
* distributed in the directory sub-tree)
*/
@Test (timeout=300000)
public void testCombineSnapshotDiff2() throws Exception {
testCombineSnapshotDiffImpl(sub, "subsub1/subsubsub1/", 3);
}
/**
* When combine two snapshots, make sure files/directories created after the
* prior snapshot get destroyed.
*/
@Test (timeout=300000)
public void testCombineSnapshotDiff3() throws Exception {
// create initial dir and subdir
Path dir = new Path("/dir");
Path subDir1 = new Path(dir, "subdir1");
Path subDir2 = new Path(dir, "subdir2");
hdfs.mkdirs(subDir2);
Path subsubDir = new Path(subDir1, "subsubdir");
hdfs.mkdirs(subsubDir);
// take snapshots on subdir and dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
// create new dir under initial dir
Path newDir = new Path(subsubDir, "newdir");
Path newFile = new Path(newDir, "newfile");
DFSTestUtil.createFile(hdfs, newFile, BLOCKSIZE, REPLICATION, seed);
Path newFile2 = new Path(subDir2, "newfile");
DFSTestUtil.createFile(hdfs, newFile2, BLOCKSIZE, REPLICATION, seed);
// create another snapshot
SnapshotTestHelper.createSnapshot(hdfs, dir, "s2");
checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 2 * REPLICATION);
// delete subsubdir and subDir2
hdfs.delete(subsubDir, true);
hdfs.delete(subDir2, true);
// add diff of s2 to subDir1, subsubDir, and subDir2
checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 2 * REPLICATION);
// delete snapshot s2
hdfs.deleteSnapshot(dir, "s2");
// delete s2 diff in dir, subDir2, and subsubDir. Delete newFile, newDir,
// and newFile2. Rename s2 diff to s1 for subDir1
checkQuotaUsageComputation(dir, 4, 0);
// Check rename of snapshot diff in subDir1
Path subdir1_s1 = SnapshotTestHelper.getSnapshotPath(dir, "s1",
subDir1.getName());
Path subdir1_s2 = SnapshotTestHelper.getSnapshotPath(dir, "s2",
subDir1.getName());
assertTrue(hdfs.exists(subdir1_s1));
assertFalse(hdfs.exists(subdir1_s2));
}
/**
* Test snapshot deletion
* @param snapshotRoot The dir where the snapshots are created
* @param modDirStr The snapshotRoot itself or one of its sub-directory,
* where the modifications happen. It is represented as a relative
* path to the snapshotRoot.
*/
private void testCombineSnapshotDiffImpl(Path snapshotRoot, String modDirStr,
int dirNodeNum) throws Exception {
Path modDir = modDirStr.isEmpty() ? snapshotRoot : new Path(snapshotRoot,
modDirStr);
Path file10 = new Path(modDir, "file10");
Path file11 = new Path(modDir, "file11");
Path file12 = new Path(modDir, "file12");
Path file13 = new Path(modDir, "file13");
Path file14 = new Path(modDir, "file14");
Path file15 = new Path(modDir, "file15");
DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, seed);
// create snapshot s1 for snapshotRoot
SnapshotTestHelper.createSnapshot(hdfs, snapshotRoot, "s1");
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 8 * BLOCKSIZE);
// delete file11
hdfs.delete(file11, true);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 8 * BLOCKSIZE);
// modify file12
hdfs.setReplication(file12, REPLICATION);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 9 * BLOCKSIZE);
// modify file13
hdfs.setReplication(file13, REPLICATION);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 10 * BLOCKSIZE);
// create file14
DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, seed);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 5, 13 * BLOCKSIZE);
// create file15
DFSTestUtil.createFile(hdfs, file15, BLOCKSIZE, REPLICATION, seed);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 16 * BLOCKSIZE);
// create snapshot s2 for snapshotRoot
hdfs.createSnapshot(snapshotRoot, "s2");
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 16 * BLOCKSIZE);
// create file11 again: (0, d) + (c, 0)
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION, seed);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// delete file12
hdfs.delete(file12, true);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// modify file13
hdfs.setReplication(file13, (short) (REPLICATION - 2));
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// delete file14: (c, 0) + (0, d)
hdfs.delete(file14, true);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// modify file15
hdfs.setReplication(file15, REPLICATION_1);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// create snapshot s3 for snapshotRoot
hdfs.createSnapshot(snapshotRoot, "s3");
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// modify file10, to check if the posterior diff was set correctly
hdfs.setReplication(file10, REPLICATION);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 20 * BLOCKSIZE);
Path file10_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
modDirStr + "file10");
Path file11_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
modDirStr + "file11");
Path file12_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
modDirStr + "file12");
Path file13_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
modDirStr + "file13");
Path file14_s2 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s2",
modDirStr + "file14");
Path file15_s2 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s2",
modDirStr + "file15");
FileStatus statusBeforeDeletion10 = hdfs.getFileStatus(file10_s1);
FileStatus statusBeforeDeletion11 = hdfs.getFileStatus(file11_s1);
FileStatus statusBeforeDeletion12 = hdfs.getFileStatus(file12_s1);
FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(
file14_s2.toString(), 1, fsdir, blockmanager);
BlockInfo[] blocks_14 = file14Node.getBlocks();
TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir,
blockmanager);
// delete s2, in which process we need to combine the diff in s2 to s1
hdfs.deleteSnapshot(snapshotRoot, "s2");
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 14 * BLOCKSIZE);
// check the correctness of s1
FileStatus statusAfterDeletion10 = hdfs.getFileStatus(file10_s1);
FileStatus statusAfterDeletion11 = hdfs.getFileStatus(file11_s1);
FileStatus statusAfterDeletion12 = hdfs.getFileStatus(file12_s1);
FileStatus statusAfterDeletion13 = hdfs.getFileStatus(file13_s1);
assertEquals(statusBeforeDeletion10.toString(),
statusAfterDeletion10.toString());
assertEquals(statusBeforeDeletion11.toString(),
statusAfterDeletion11.toString());
assertEquals(statusBeforeDeletion12.toString(),
statusAfterDeletion12.toString());
assertEquals(statusBeforeDeletion13.toString(),
statusAfterDeletion13.toString());
TestSnapshotBlocksMap.assertBlockCollection(file10_s1.toString(), 1, fsdir,
blockmanager);
TestSnapshotBlocksMap.assertBlockCollection(file11_s1.toString(), 1, fsdir,
blockmanager);
TestSnapshotBlocksMap.assertBlockCollection(file12_s1.toString(), 1, fsdir,
blockmanager);
TestSnapshotBlocksMap.assertBlockCollection(file13_s1.toString(), 1, fsdir,
blockmanager);
// make sure file14 and file15 are not included in s1
Path file14_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
modDirStr + "file14");
Path file15_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1",
modDirStr + "file15");
assertFalse(hdfs.exists(file14_s1));
assertFalse(hdfs.exists(file15_s1));
for (BlockInfo b : blocks_14) {
assertNull(blockmanager.getBlockCollection(b));
}
INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
assertEquals(REPLICATION_1, nodeFile13.getPreferredBlockReplication());
TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir,
blockmanager);
INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
assertEquals(REPLICATION_1, nodeFile12.getPreferredBlockReplication());
}
/** Test deleting snapshots with modification on the metadata of directory */
@Test (timeout=300000)
public void testDeleteSnapshotWithDirModification() throws Exception {
Path file = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
hdfs.setOwner(sub, "user1", "group1");
// create snapshot s1 for sub1, and change the metadata of sub1
SnapshotTestHelper.createSnapshot(hdfs, sub, "s1");
checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
hdfs.setOwner(sub, "user2", "group2");
checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
// create snapshot s2 for sub1, but do not modify sub1 afterwards
hdfs.createSnapshot(sub, "s2");
checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
// create snapshot s3 for sub1, and change the metadata of sub1
hdfs.createSnapshot(sub, "s3");
checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
hdfs.setOwner(sub, "user3", "group3");
checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
// delete snapshot s3
hdfs.deleteSnapshot(sub, "s3");
checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
// check sub1's metadata in snapshot s2
FileStatus statusOfS2 = hdfs.getFileStatus(new Path(sub,
HdfsConstants.DOT_SNAPSHOT_DIR + "/s2"));
assertEquals("user2", statusOfS2.getOwner());
assertEquals("group2", statusOfS2.getGroup());
// delete snapshot s2
hdfs.deleteSnapshot(sub, "s2");
checkQuotaUsageComputation(sub, 2, BLOCKSIZE * 3);
// check sub1's metadata in snapshot s1
FileStatus statusOfS1 = hdfs.getFileStatus(new Path(sub,
HdfsConstants.DOT_SNAPSHOT_DIR + "/s1"));
assertEquals("user1", statusOfS1.getOwner());
assertEquals("group1", statusOfS1.getGroup());
}
@Test
public void testDeleteSnapshotWithPermissionsDisabled() throws Exception {
cluster.shutdown();
Configuration newConf = new Configuration(conf);
newConf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
cluster = new MiniDFSCluster.Builder(newConf).numDataNodes(0).build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
final Path path = new Path("/dir");
hdfs.mkdirs(path);
hdfs.allowSnapshot(path);
hdfs.mkdirs(new Path(path, "/test"));
hdfs.createSnapshot(path, "s1");
UserGroupInformation anotherUser = UserGroupInformation
.createRemoteUser("anotheruser");
anotherUser.doAs(new PrivilegedAction<Object>() {
@Override
public Object run() {
DistributedFileSystem anotherUserFS = null;
try {
anotherUserFS = cluster.getFileSystem();
anotherUserFS.deleteSnapshot(path, "s1");
} catch (IOException e) {
fail("Failed to delete snapshot : " + e.getLocalizedMessage());
} finally {
IOUtils.closeStream(anotherUserFS);
}
return null;
}
});
}
/**
* A test covering the case where the snapshot diff to be deleted is renamed
* to its previous snapshot.
*/
@Test (timeout=300000)
public void testRenameSnapshotDiff() throws Exception {
cluster.getNamesystem().getSnapshotManager().setAllowNestedSnapshots(true);
final Path subFile0 = new Path(sub, "file0");
final Path subsubFile0 = new Path(subsub, "file0");
DFSTestUtil.createFile(hdfs, subFile0, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, subsubFile0, BLOCKSIZE, REPLICATION, seed);
hdfs.setOwner(subsub, "owner", "group");
// create snapshot s0 on sub
SnapshotTestHelper.createSnapshot(hdfs, sub, "s0");
checkQuotaUsageComputation(sub, 4, BLOCKSIZE * 6);
// make some changes on both sub and subsub
final Path subFile1 = new Path(sub, "file1");
final Path subsubFile1 = new Path(subsub, "file1");
DFSTestUtil.createFile(hdfs, subFile1, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, subsubFile1, BLOCKSIZE, REPLICATION, seed);
checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
// create snapshot s1 on sub
SnapshotTestHelper.createSnapshot(hdfs, sub, "s1");
checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
// create snapshot s2 on dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s2");
checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 11);
checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
// make changes on subsub and subsubFile1
hdfs.setOwner(subsub, "unknown", "unknown");
hdfs.setReplication(subsubFile1, REPLICATION_1);
checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 11);
checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
// make changes on sub
hdfs.delete(subFile1, true);
checkQuotaUsageComputation(new Path("/"), 8, BLOCKSIZE * 11);
checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 11);
checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
Path subsubSnapshotCopy = SnapshotTestHelper.getSnapshotPath(dir, "s2",
sub.getName() + Path.SEPARATOR + subsub.getName());
Path subsubFile1SCopy = SnapshotTestHelper.getSnapshotPath(dir, "s2",
sub.getName() + Path.SEPARATOR + subsub.getName() + Path.SEPARATOR
+ subsubFile1.getName());
Path subFile1SCopy = SnapshotTestHelper.getSnapshotPath(dir, "s2",
sub.getName() + Path.SEPARATOR + subFile1.getName());
FileStatus subsubStatus = hdfs.getFileStatus(subsubSnapshotCopy);
assertEquals("owner", subsubStatus.getOwner());
assertEquals("group", subsubStatus.getGroup());
FileStatus subsubFile1Status = hdfs.getFileStatus(subsubFile1SCopy);
assertEquals(REPLICATION, subsubFile1Status.getReplication());
FileStatus subFile1Status = hdfs.getFileStatus(subFile1SCopy);
assertEquals(REPLICATION_1, subFile1Status.getReplication());
// delete snapshot s2
hdfs.deleteSnapshot(dir, "s2");
checkQuotaUsageComputation(new Path("/"), 8, BLOCKSIZE * 11);
checkQuotaUsageComputation(dir, 7, BLOCKSIZE * 11);
checkQuotaUsageComputation(sub, 6, BLOCKSIZE * 11);
// no snapshot copy for s2
try {
hdfs.getFileStatus(subsubSnapshotCopy);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: "
+ subsubSnapshotCopy.toString(), e);
}
try {
hdfs.getFileStatus(subsubFile1SCopy);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: "
+ subsubFile1SCopy.toString(), e);
}
try {
hdfs.getFileStatus(subFile1SCopy);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: "
+ subFile1SCopy.toString(), e);
}
// the snapshot copy of s2 should now be renamed to s1 under sub
subsubSnapshotCopy = SnapshotTestHelper.getSnapshotPath(sub, "s1",
subsub.getName());
subsubFile1SCopy = SnapshotTestHelper.getSnapshotPath(sub, "s1",
subsub.getName() + Path.SEPARATOR + subsubFile1.getName());
subFile1SCopy = SnapshotTestHelper.getSnapshotPath(sub, "s1",
subFile1.getName());
subsubStatus = hdfs.getFileStatus(subsubSnapshotCopy);
assertEquals("owner", subsubStatus.getOwner());
assertEquals("group", subsubStatus.getGroup());
subsubFile1Status = hdfs.getFileStatus(subsubFile1SCopy);
assertEquals(REPLICATION, subsubFile1Status.getReplication());
// also subFile1's snapshot copy should have been moved to diff of s1 as
// combination
subFile1Status = hdfs.getFileStatus(subFile1SCopy);
assertEquals(REPLICATION_1, subFile1Status.getReplication());
}
@Test
public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception {
ByteArrayOutputStream out = new ByteArrayOutputStream();
PrintStream psOut = new PrintStream(out);
System.setOut(psOut);
System.setErr(psOut);
FsShell shell = new FsShell();
shell.setConf(conf);
String[] argv1 = {"-deleteSnapshot", "/tmp"};
int val = shell.run(argv1);
assertTrue(val == -1);
assertTrue(out.toString().contains(
argv1[0] + ": Incorrect number of arguments."));
out.reset();
String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"};
val = shell.run(argv2);
assertTrue(val == -1);
assertTrue(out.toString().contains(
argv2[0] + ": Incorrect number of arguments."));
psOut.close();
out.close();
}
/*
* OP_DELETE_SNAPSHOT edits op was not decrementing the safemode threshold on
* restart in HA mode. HDFS-5504
*/
@Test(timeout = 60000)
public void testHANNRestartAfterSnapshotDeletion() throws Exception {
hdfs.close();
cluster.shutdown();
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf)
.nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(1)
.build();
cluster.transitionToActive(0);
// stop the standby namenode
NameNode snn = cluster.getNameNode(1);
snn.stop();
hdfs = (DistributedFileSystem) HATestUtil
.configureFailoverFs(cluster, conf);
Path dir = new Path("/dir");
Path subDir = new Path(dir, "sub");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
for (int i = 0; i < 5; i++) {
DFSTestUtil.createFile(hdfs, new Path(subDir, "" + i), 100, (short) 1,
1024L);
}
// take snapshot
hdfs.createSnapshot(dir, "s0");
// delete the subdir
hdfs.delete(subDir, true);
// roll the edit log
NameNode ann = cluster.getNameNode(0);
ann.getRpcServer().rollEditLog();
hdfs.deleteSnapshot(dir, "s0");
// wait for the blocks deletion at namenode
Thread.sleep(2000);
NameNodeAdapter.abortEditLogs(ann);
cluster.restartNameNode(0, false);
cluster.transitionToActive(0);
// wait till the cluster becomes active
cluster.waitClusterUp();
}
@Test
public void testCorrectNumberOfBlocksAfterRestart() throws IOException {
final Path foo = new Path("/foo");
final Path bar = new Path(foo, "bar");
final Path file = new Path(foo, "file");
final String snapshotName = "ss0";
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
hdfs.mkdirs(bar);
hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
hdfs.allowSnapshot(foo);
hdfs.createSnapshot(foo, snapshotName);
hdfs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
hdfs.saveNamespace();
hdfs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
hdfs.deleteSnapshot(foo, snapshotName);
hdfs.delete(bar, true);
hdfs.delete(foo, true);
long numberOfBlocks = cluster.getNamesystem().getBlocksTotal();
cluster.restartNameNode(0);
assertEquals(numberOfBlocks, cluster.getNamesystem().getBlocksTotal());
}
}
| 47,572
| 40.730702
| 102
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSetQuotaWithSnapshot.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff;
import org.apache.hadoop.hdfs.util.Diff.ListType;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.ExpectedException;
public class TestSetQuotaWithSnapshot {
protected static final long seed = 0;
protected static final short REPLICATION = 3;
protected static final long BLOCKSIZE = 1024;
protected Configuration conf;
protected MiniDFSCluster cluster;
protected FSNamesystem fsn;
protected FSDirectory fsdir;
protected DistributedFileSystem hdfs;
@Rule
public ExpectedException exception = ExpectedException.none();
@Before
public void setUp() throws Exception {
conf = new Configuration();
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.format(true).build();
cluster.waitActive();
fsn = cluster.getNamesystem();
fsdir = fsn.getFSDirectory();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test (timeout=60000)
public void testSetQuota() throws Exception {
final Path dir = new Path("/TestSnapshot");
hdfs.mkdirs(dir);
// allow snapshot on dir and create snapshot s1
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
Path sub = new Path(dir, "sub");
hdfs.mkdirs(sub);
Path fileInSub = new Path(sub, "file");
DFSTestUtil.createFile(hdfs, fileInSub, BLOCKSIZE, REPLICATION, seed);
INodeDirectory subNode = INodeDirectory.valueOf(
fsdir.getINode(sub.toString()), sub);
// subNode should be a INodeDirectory, but not an INodeDirectoryWithSnapshot
assertFalse(subNode.isWithSnapshot());
hdfs.setQuota(sub, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
subNode = INodeDirectory.valueOf(fsdir.getINode(sub.toString()), sub);
assertTrue(subNode.isQuotaSet());
assertFalse(subNode.isWithSnapshot());
}
/**
* Test clear quota of a snapshottable dir or a dir with snapshot.
*/
@Test
public void testClearQuota() throws Exception {
final Path dir = new Path("/TestSnapshot");
hdfs.mkdirs(dir);
hdfs.allowSnapshot(dir);
hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET,
HdfsConstants.QUOTA_DONT_SET);
INodeDirectory dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0, dirNode.getDiffs().asList().size());
hdfs.setQuota(dir, HdfsConstants.QUOTA_DONT_SET - 1,
HdfsConstants.QUOTA_DONT_SET - 1);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0, dirNode.getDiffs().asList().size());
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(0, dirNode.getDiffs().asList().size());
// allow snapshot on dir and create snapshot s1
SnapshotTestHelper.createSnapshot(hdfs, dir, "s1");
// clear quota of dir
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
// dir should still be a snapshottable directory
dirNode = fsdir.getINode4Write(dir.toString()).asDirectory();
assertTrue(dirNode.isSnapshottable());
assertEquals(1, dirNode.getDiffs().asList().size());
SnapshottableDirectoryStatus[] status = hdfs.getSnapshottableDirListing();
assertEquals(1, status.length);
assertEquals(dir, status[0].getFullPath());
final Path subDir = new Path(dir, "sub");
hdfs.mkdirs(subDir);
hdfs.createSnapshot(dir, "s2");
final Path file = new Path(subDir, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
hdfs.setQuota(dir, HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
INode subNode = fsdir.getINode4Write(subDir.toString());
assertTrue(subNode.asDirectory().isWithSnapshot());
List<DirectoryDiff> diffList = subNode.asDirectory().getDiffs().asList();
assertEquals(1, diffList.size());
Snapshot s2 = dirNode.getSnapshot(DFSUtil.string2Bytes("s2"));
assertEquals(s2.getId(), diffList.get(0).getSnapshotId());
List<INode> createdList = diffList.get(0).getChildrenDiff().getList(ListType.CREATED);
assertEquals(1, createdList.size());
assertSame(fsdir.getINode4Write(file.toString()), createdList.get(0));
}
}
| 6,357
| 38.7375
| 98
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotNameWithInvalidCharacters.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.ipc.RemoteException;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestSnapshotNameWithInvalidCharacters {
private static final long SEED = 0;
private static final short REPLICATION = 1;
private static final int BLOCKSIZE = 1024;
private static final Configuration conf = new Configuration();
private static MiniDFSCluster cluster;
private static DistributedFileSystem hdfs;
private final Path dir1 = new Path("/");
private final String file1Name = "file1";
private final String snapshot1 = "a:b:c";
private final String snapshot2 = "a/b/c";
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
hdfs = cluster.getFileSystem();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
cluster = null;
}
}
@Test (timeout = 600000)
public void TestSnapshotWithInvalidName() throws Exception {
Path file1 = new Path(dir1,file1Name);
DFSTestUtil.createFile(hdfs,file1, BLOCKSIZE,REPLICATION,SEED);
hdfs.allowSnapshot(dir1);
try {
hdfs.createSnapshot(dir1, snapshot1);
} catch (RemoteException e) {
}
}
@Test(timeout = 60000)
public void TestSnapshotWithInvalidName1() throws Exception{
Path file1 = new Path(dir1, file1Name);
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED);
hdfs.allowSnapshot(dir1);
try {
hdfs.createSnapshot(dir1, snapshot2);
} catch (RemoteException e) {
}
}
}
| 2,774
| 30.534091
| 75
|
java
|
hadoop
|
hadoop-master/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java
|
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.namenode.snapshot;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestSnapshotListing {
static final long seed = 0;
static final short REPLICATION = 3;
static final long BLOCKSIZE = 1024;
private final Path dir = new Path("/test.snapshot/dir");
Configuration conf;
MiniDFSCluster cluster;
FSNamesystem fsn;
DistributedFileSystem hdfs;
@Before
public void setUp() throws Exception {
conf = new Configuration();
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
.build();
cluster.waitActive();
fsn = cluster.getNamesystem();
hdfs = cluster.getFileSystem();
hdfs.mkdirs(dir);
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
/**
* Test listing snapshots under a snapshottable directory
*/
@Test (timeout=15000)
public void testListSnapshots() throws Exception {
final Path snapshotsPath = new Path(dir, ".snapshot");
FileStatus[] stats = null;
// special case: snapshots of root
stats = hdfs.listStatus(new Path("/.snapshot"));
// should be 0 since root's snapshot quota is 0
assertEquals(0, stats.length);
// list before set dir as snapshottable
try {
stats = hdfs.listStatus(snapshotsPath);
fail("expect SnapshotException");
} catch (IOException e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory: " + dir.toString(), e);
}
// list before creating snapshots
hdfs.allowSnapshot(dir);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(0, stats.length);
// list while creating snapshots
final int snapshotNum = 5;
for (int sNum = 0; sNum < snapshotNum; sNum++) {
hdfs.createSnapshot(dir, "s_" + sNum);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(sNum + 1, stats.length);
for (int i = 0; i <= sNum; i++) {
assertEquals("s_" + i, stats[i].getPath().getName());
}
}
// list while deleting snapshots
for (int sNum = snapshotNum - 1; sNum > 0; sNum--) {
hdfs.deleteSnapshot(dir, "s_" + sNum);
stats = hdfs.listStatus(snapshotsPath);
assertEquals(sNum, stats.length);
for (int i = 0; i < sNum; i++) {
assertEquals("s_" + i, stats[i].getPath().getName());
}
}
// remove the last snapshot
hdfs.deleteSnapshot(dir, "s_0");
stats = hdfs.listStatus(snapshotsPath);
assertEquals(0, stats.length);
}
}
| 3,862
| 30.92562
| 78
|
java
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.