use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadFile.
@Test
public void testHasNonEcBlockUsingStripedIDForLoadFile() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_loadfile";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser_loadfile";
String clientMachine = "testMachine_loadfile";
long blkId = -1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
fs.mkdir(new Path(testDir), new FsPermission("755"));
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
TestINodeFile.toCompleteFile(file);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
//after nonEcBlockUsingStripedID is deleted
//the hasNonEcBlockUsingStripedID is set to false
fs = cluster.getFileSystem();
fs.delete(p, false);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertFalse(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSImage method testHasNonEcBlockUsingStripedIDForLoadSnapshot.
@Test
public void testHasNonEcBlockUsingStripedIDForLoadSnapshot() throws IOException {
// start a cluster
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9).build();
cluster.waitActive();
DistributedFileSystem fs = cluster.getFileSystem();
FSNamesystem fns = cluster.getNamesystem();
String testDir = "/test_block_manager";
String testFile = "testfile_loadSnapshot";
String testFilePath = testDir + "/" + testFile;
String clientName = "testUser_loadSnapshot";
String clientMachine = "testMachine_loadSnapshot";
long blkId = -1;
long blkNumBytes = 1024;
long timestamp = 1426222918;
Path d = new Path(testDir);
fs.mkdir(d, new FsPermission("755"));
fs.allowSnapshot(d);
Path p = new Path(testFilePath);
DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
BlockInfoContiguous cBlk = new BlockInfoContiguous(new Block(blkId, blkNumBytes, timestamp), (short) 3);
INodeFile file = (INodeFile) fns.getFSDirectory().getINode(testFilePath);
file.toUnderConstruction(clientName, clientMachine);
file.addBlock(cBlk);
TestINodeFile.toCompleteFile(file);
fs.createSnapshot(d, "testHasNonEcBlockUsingStripeID");
fs.truncate(p, 0);
fns.enterSafeMode(false);
fns.saveNamespace(0, 0);
cluster.restartNameNodes();
cluster.waitActive();
fns = cluster.getNamesystem();
assertTrue(fns.getBlockManager().hasNonEcBlockUsingStripedID());
cluster.shutdown();
cluster = null;
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSImageWithAcl method testRootACLAfterLoadingFsImage.
@Test
public void testRootACLAfterLoadingFsImage() throws IOException {
DistributedFileSystem fs = cluster.getFileSystem();
Path rootdir = new Path("/");
AclEntry e1 = new AclEntry.Builder().setName("foo").setPermission(ALL).setScope(ACCESS).setType(GROUP).build();
AclEntry e2 = new AclEntry.Builder().setName("bar").setPermission(READ).setScope(ACCESS).setType(GROUP).build();
fs.modifyAclEntries(rootdir, Lists.newArrayList(e1, e2));
AclStatus s = cluster.getNamesystem().getAclStatus(rootdir.toString());
AclEntry[] returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ), aclEntry(ACCESS, GROUP, "foo", ALL) }, returned);
// restart - hence save and load from fsimage
restart(fs, true);
s = cluster.getNamesystem().getAclStatus(rootdir.toString());
returned = Lists.newArrayList(s.getEntries()).toArray(new AclEntry[0]);
Assert.assertArrayEquals(new AclEntry[] { aclEntry(ACCESS, GROUP, READ_EXECUTE), aclEntry(ACCESS, GROUP, "bar", READ), aclEntry(ACCESS, GROUP, "foo", ALL) }, returned);
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFsck method testFsckCorruptECFile.
@Test(timeout = 300000)
public void testFsckCorruptECFile() throws Exception {
DistributedFileSystem fs = null;
int dataBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumDataUnits();
int parityBlocks = StripedFileTestUtil.getDefaultECPolicy().getNumParityUnits();
int cellSize = StripedFileTestUtil.getDefaultECPolicy().getCellSize();
int totalSize = dataBlocks + parityBlocks;
conf.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY, StripedFileTestUtil.getDefaultECPolicy().getName());
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(totalSize).build();
fs = cluster.getFileSystem();
Map<Integer, Integer> dnIndices = new HashMap<>();
ArrayList<DataNode> dnList = cluster.getDataNodes();
for (int i = 0; i < totalSize; i++) {
dnIndices.put(dnList.get(i).getIpcPort(), i);
}
// create file
Path ecDirPath = new Path("/striped");
fs.mkdir(ecDirPath, FsPermission.getDirDefault());
fs.getClient().setErasureCodingPolicy(ecDirPath.toString(), StripedFileTestUtil.getDefaultECPolicy().getName());
Path file = new Path(ecDirPath, "corrupted");
final int length = cellSize * dataBlocks;
final byte[] bytes = StripedFileTestUtil.generateBytes(length);
DFSTestUtil.writeFile(fs, file, bytes);
LocatedStripedBlock lsb = (LocatedStripedBlock) fs.getClient().getLocatedBlocks(file.toString(), 0, cellSize * dataBlocks).get(0);
final LocatedBlock[] blks = StripedBlockUtil.parseStripedBlockGroup(lsb, cellSize, dataBlocks, parityBlocks);
// make an unrecoverable ec file with corrupted blocks
for (int i = 0; i < parityBlocks + 1; i++) {
int ipcPort = blks[i].getLocations()[0].getIpcPort();
int dnIndex = dnIndices.get(ipcPort);
File storageDir = cluster.getInstanceStorageDir(dnIndex, 0);
File blkFile = MiniDFSCluster.getBlockFile(storageDir, blks[i].getBlock());
Assert.assertTrue("Block file does not exist", blkFile.exists());
FileOutputStream out = new FileOutputStream(blkFile);
out.write("corruption".getBytes());
}
// kept in NameNode
for (DataNode dn : cluster.getDataNodes()) {
DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, true);
}
// Read the file to trigger reportBadBlocks
try {
IOUtils.copyBytes(fs.open(file), new IOUtils.NullOutputStream(), conf, true);
} catch (IOException ie) {
assertTrue(ie.getMessage().contains("missingChunksNum=" + (parityBlocks + 1)));
}
waitForUnrecoverableBlockGroup(conf);
String outStr = runFsck(conf, 1, true, "/");
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
assertTrue(outStr.contains("Under-erasure-coded block groups:\t0"));
outStr = runFsck(conf, -1, true, "/", "-list-corruptfileblocks");
assertTrue(outStr.contains("has 1 CORRUPT files"));
}
use of org.apache.hadoop.hdfs.DistributedFileSystem in project hadoop by apache.
the class TestFSImageWithXAttr method testXAttr.
private void testXAttr(boolean persistNamespace) throws IOException {
Path path = new Path("/p");
DistributedFileSystem fs = cluster.getFileSystem();
fs.create(path).close();
fs.setXAttr(path, name1, value1, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name2, value2, EnumSet.of(XAttrSetFlag.CREATE));
fs.setXAttr(path, name3, null, EnumSet.of(XAttrSetFlag.CREATE));
restart(fs, persistNamespace);
Map<String, byte[]> xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(value1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
Assert.assertArrayEquals(value3, xattrs.get(name3));
fs.setXAttr(path, name1, newValue1, EnumSet.of(XAttrSetFlag.REPLACE));
restart(fs, persistNamespace);
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 3);
Assert.assertArrayEquals(newValue1, xattrs.get(name1));
Assert.assertArrayEquals(value2, xattrs.get(name2));
Assert.assertArrayEquals(value3, xattrs.get(name3));
fs.removeXAttr(path, name1);
fs.removeXAttr(path, name2);
fs.removeXAttr(path, name3);
restart(fs, persistNamespace);
xattrs = fs.getXAttrs(path);
Assert.assertEquals(xattrs.size(), 0);
}
Aggregations