use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotDeletion method testCombineSnapshotDiffImpl.
/**
* Test snapshot deletion
* @param snapshotRoot The dir where the snapshots are created
* @param modDirStr The snapshotRoot itself or one of its sub-directory,
* where the modifications happen. It is represented as a relative
* path to the snapshotRoot.
*/
private void testCombineSnapshotDiffImpl(Path snapshotRoot, String modDirStr, int dirNodeNum) throws Exception {
Path modDir = modDirStr.isEmpty() ? snapshotRoot : new Path(snapshotRoot, modDirStr);
Path file10 = new Path(modDir, "file10");
Path file11 = new Path(modDir, "file11");
Path file12 = new Path(modDir, "file12");
Path file13 = new Path(modDir, "file13");
Path file14 = new Path(modDir, "file14");
Path file15 = new Path(modDir, "file15");
DFSTestUtil.createFile(hdfs, file10, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file12, BLOCKSIZE, REPLICATION_1, seed);
DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, seed);
// create snapshot s1 for snapshotRoot
SnapshotTestHelper.createSnapshot(hdfs, snapshotRoot, "s1");
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 8 * BLOCKSIZE);
// delete file11
hdfs.delete(file11, true);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 8 * BLOCKSIZE);
// modify file12
hdfs.setReplication(file12, REPLICATION);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 9 * BLOCKSIZE);
// modify file13
hdfs.setReplication(file13, REPLICATION);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 4, 10 * BLOCKSIZE);
// create file14
DFSTestUtil.createFile(hdfs, file14, BLOCKSIZE, REPLICATION, seed);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 5, 13 * BLOCKSIZE);
// create file15
DFSTestUtil.createFile(hdfs, file15, BLOCKSIZE, REPLICATION, seed);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 16 * BLOCKSIZE);
// create snapshot s2 for snapshotRoot
hdfs.createSnapshot(snapshotRoot, "s2");
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 16 * BLOCKSIZE);
// create file11 again: (0, d) + (c, 0)
DFSTestUtil.createFile(hdfs, file11, BLOCKSIZE, REPLICATION, seed);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// delete file12
hdfs.delete(file12, true);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// modify file13
hdfs.setReplication(file13, (short) (REPLICATION - 2));
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// delete file14: (c, 0) + (0, d)
hdfs.delete(file14, true);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// modify file15
hdfs.setReplication(file15, REPLICATION_1);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// create snapshot s3 for snapshotRoot
hdfs.createSnapshot(snapshotRoot, "s3");
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 19 * BLOCKSIZE);
// modify file10, to check if the posterior diff was set correctly
hdfs.setReplication(file10, REPLICATION);
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 7, 20 * BLOCKSIZE);
Path file10_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file10");
Path file11_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file11");
Path file12_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file12");
Path file13_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file13");
Path file14_s2 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s2", modDirStr + "file14");
Path file15_s2 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s2", modDirStr + "file15");
FileStatus statusBeforeDeletion10 = hdfs.getFileStatus(file10_s1);
FileStatus statusBeforeDeletion11 = hdfs.getFileStatus(file11_s1);
FileStatus statusBeforeDeletion12 = hdfs.getFileStatus(file12_s1);
FileStatus statusBeforeDeletion13 = hdfs.getFileStatus(file13_s1);
INodeFile file14Node = TestSnapshotBlocksMap.assertBlockCollection(file14_s2.toString(), 1, fsdir, blockmanager);
BlockInfo[] blocks_14 = file14Node.getBlocks();
TestSnapshotBlocksMap.assertBlockCollection(file15_s2.toString(), 1, fsdir, blockmanager);
// delete s2, in which process we need to combine the diff in s2 to s1
hdfs.deleteSnapshot(snapshotRoot, "s2");
checkQuotaUsageComputation(snapshotRoot, dirNodeNum + 6, 14 * BLOCKSIZE);
// check the correctness of s1
FileStatus statusAfterDeletion10 = hdfs.getFileStatus(file10_s1);
FileStatus statusAfterDeletion11 = hdfs.getFileStatus(file11_s1);
FileStatus statusAfterDeletion12 = hdfs.getFileStatus(file12_s1);
FileStatus statusAfterDeletion13 = hdfs.getFileStatus(file13_s1);
assertEquals(statusBeforeDeletion10.toString(), statusAfterDeletion10.toString());
assertEquals(statusBeforeDeletion11.toString(), statusAfterDeletion11.toString());
assertEquals(statusBeforeDeletion12.toString(), statusAfterDeletion12.toString());
assertEquals(statusBeforeDeletion13.toString(), statusAfterDeletion13.toString());
TestSnapshotBlocksMap.assertBlockCollection(file10_s1.toString(), 1, fsdir, blockmanager);
TestSnapshotBlocksMap.assertBlockCollection(file11_s1.toString(), 1, fsdir, blockmanager);
TestSnapshotBlocksMap.assertBlockCollection(file12_s1.toString(), 1, fsdir, blockmanager);
TestSnapshotBlocksMap.assertBlockCollection(file13_s1.toString(), 1, fsdir, blockmanager);
// make sure file14 and file15 are not included in s1
Path file14_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file14");
Path file15_s1 = SnapshotTestHelper.getSnapshotPath(snapshotRoot, "s1", modDirStr + "file15");
assertFalse(hdfs.exists(file14_s1));
assertFalse(hdfs.exists(file15_s1));
for (BlockInfo b : blocks_14) {
assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
INodeFile nodeFile13 = (INodeFile) fsdir.getINode(file13.toString());
for (BlockInfo b : nodeFile13.getBlocks()) {
assertEquals(REPLICATION_1, b.getReplication());
}
TestSnapshotBlocksMap.assertBlockCollection(file13.toString(), 1, fsdir, blockmanager);
INodeFile nodeFile12 = (INodeFile) fsdir.getINode(file12_s1.toString());
for (BlockInfo b : nodeFile12.getBlocks()) {
assertEquals(REPLICATION_1, b.getReplication());
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotReplication method testReplicationAfterDeletion.
/**
* Test replication for a file with snapshots, also including the scenario
* where the original file is deleted
*/
@Test(timeout = 60000)
public void testReplicationAfterDeletion() throws Exception {
// Create file1, set its replication to 3
DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed);
Map<Path, Short> snapshotRepMap = new HashMap<Path, Short>();
// Take 3 snapshots of sub1
for (int i = 1; i <= 3; i++) {
Path root = SnapshotTestHelper.createSnapshot(hdfs, sub1, "s" + i);
Path ssFile = new Path(root, file1.getName());
snapshotRepMap.put(ssFile, REPLICATION);
}
// Check replication
checkFileReplication(file1, REPLICATION, REPLICATION);
checkSnapshotFileReplication(file1, snapshotRepMap, REPLICATION);
// Delete file1
hdfs.delete(file1, true);
// Check replication of snapshots
for (Path ss : snapshotRepMap.keySet()) {
final INodeFile ssInode = getINodeFile(ss);
// always == expectedBlockRep
for (BlockInfo b : ssInode.getBlocks()) {
assertEquals(REPLICATION, b.getReplication());
}
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(), ssInode.getFileReplication());
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotBlocksMap method testDeletionWithSnapshots.
/**
* Test deleting a file with snapshots. Need to check the blocksMap to make
* sure the corresponding record is updated correctly.
*/
@Test(timeout = 60000)
public void testDeletionWithSnapshots() throws Exception {
Path file0 = new Path(sub1, "file0");
Path file1 = new Path(sub1, "file1");
Path sub2 = new Path(sub1, "sub2");
Path file2 = new Path(sub2, "file2");
Path file3 = new Path(sub1, "file3");
Path file4 = new Path(sub1, "file4");
Path file5 = new Path(sub1, "file5");
// Create file under sub1
DFSTestUtil.createFile(hdfs, file0, 4 * BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, 2 * BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 3 * BLOCKSIZE, REPLICATION, seed);
// Normal deletion
{
final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir, blockmanager);
BlockInfo[] blocks = f2.getBlocks();
hdfs.delete(sub2, true);
// The INode should have been removed from the blocksMap
for (BlockInfo b : blocks) {
assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
}
// Create snapshots for sub1
final String[] snapshots = { "s0", "s1", "s2" };
DFSTestUtil.createFile(hdfs, file3, 5 * BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]);
DFSTestUtil.createFile(hdfs, file4, 1 * BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]);
DFSTestUtil.createFile(hdfs, file5, 7 * BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]);
// set replication so that the inode should be replaced for snapshots
{
INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
Assert.assertSame(INodeFile.class, f1.getClass());
hdfs.setReplication(file1, (short) 2);
f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
assertTrue(f1.isWithSnapshot());
assertFalse(f1.isUnderConstruction());
}
// Check the block information for file0
final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir, blockmanager);
BlockInfo[] blocks0 = f0.getBlocks();
// Also check the block information for snapshot of file0
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0", file0.getName());
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
// Delete file0
hdfs.delete(file0, true);
// Make sure the blocks of file0 is still in blocksMap
for (BlockInfo b : blocks0) {
assertNotEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
// Compare the INode in the blocksMap with INodes for snapshots
String s1f0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1", file0.getName()).toString();
assertBlockCollection(s1f0, 4, fsdir, blockmanager);
// Delete snapshot s1
hdfs.deleteSnapshot(sub1, "s1");
// Make sure the first block of file0 is still in blocksMap
for (BlockInfo b : blocks0) {
assertNotEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
try {
INodeFile.valueOf(fsdir.getINode(s1f0), s1f0);
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
} catch (IOException e) {
assertExceptionContains("File does not exist: " + s1f0, e);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotDeletion method testDeleteEarliestSnapshot2.
/**
* Test deleting the earliest (first) snapshot. In this more complicated
* scenario, the snapshots are taken across directories.
* <pre>
* The test covers the following scenarios:
* 1. delete the first diff in the diff list of a directory
* 2. delete the first diff in the diff list of a file
* </pre>
* Also, the recursive cleanTree process should cover both INodeFile and
* INodeDirectory.
*/
@Test(timeout = 300000)
public void testDeleteEarliestSnapshot2() throws Exception {
Path noChangeDir = new Path(sub, "noChangeDir");
Path noChangeFile = new Path(noChangeDir, "noChangeFile");
Path metaChangeFile = new Path(noChangeDir, "metaChangeFile");
Path metaChangeDir = new Path(noChangeDir, "metaChangeDir");
Path toDeleteFile = new Path(metaChangeDir, "toDeleteFile");
DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, metaChangeFile, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, toDeleteFile, BLOCKSIZE, REPLICATION, seed);
final INodeFile toDeleteFileNode = TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
BlockInfo[] blocks = toDeleteFileNode.getBlocks();
// create snapshot s0 on dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
hdfs.delete(toDeleteFile, true);
// the deletion adds diff of toDeleteFile and metaChangeDir
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
// /TestSnapshot/sub/noChangeDir/metaChangeFile
hdfs.setReplication(metaChangeFile, REPLICATION_1);
hdfs.setOwner(metaChangeDir, "unknown", "unknown");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// create snapshot s1 on dir
hdfs.createSnapshot(dir, "s1");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// delete snapshot s0
hdfs.deleteSnapshot(dir, "s0");
// namespace: remove toDeleteFile and its diff, metaChangeFile's diff,
// metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
// metaChangeFile's replication factor decreases
checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
for (BlockInfo b : blocks) {
assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
// check 1. there is no snapshot s0
final INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0);
Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
DirectoryDiffList diffList = dirNode.getDiffs();
assertEquals(1, diffList.asList().size());
assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId());
diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs();
assertEquals(0, diffList.asList().size());
// check 2. noChangeDir and noChangeFile are still there
final INodeDirectory noChangeDirNode = (INodeDirectory) fsdir.getINode(noChangeDir.toString());
assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
final INodeFile noChangeFileNode = (INodeFile) fsdir.getINode(noChangeFile.toString());
assertEquals(INodeFile.class, noChangeFileNode.getClass());
TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1, fsdir, blockmanager);
// check 3: current metadata of metaChangeFile and metaChangeDir
FileStatus status = hdfs.getFileStatus(metaChangeDir);
assertEquals("unknown", status.getOwner());
assertEquals("unknown", status.getGroup());
status = hdfs.getFileStatus(metaChangeFile);
assertEquals(REPLICATION_1, status.getReplication());
TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(), 1, fsdir, blockmanager);
// check 4: no snapshot copy for toDeleteFile
try {
hdfs.getFileStatus(toDeleteFile);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(), e);
}
final Path toDeleteFileInSnapshot = SnapshotTestHelper.getSnapshotPath(dir, "s0", toDeleteFile.toString().substring(dir.toString().length()));
try {
hdfs.getFileStatus(toDeleteFileInSnapshot);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(), e);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotReplication method checkSnapshotFileReplication.
/**
* Check the replication for both the current file and all its prior snapshots
*
* @param currentFile
* the Path of the current file
* @param snapshotRepMap
* A map maintaining all the snapshots of the current file, as well
* as their expected replication number stored in their corresponding
* INodes
* @param expectedBlockRep
* The expected replication number
* @throws Exception
*/
private void checkSnapshotFileReplication(Path currentFile, Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
// First check the getPreferredBlockReplication for the INode of
// the currentFile
final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
for (BlockInfo b : inodeOfCurrentFile.getBlocks()) {
assertEquals(expectedBlockRep, b.getReplication());
}
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), DirOp.READ);
final INodeFile ssInode = iip.getLastINode().asFile();
// always == expectedBlockRep
for (BlockInfo b : ssInode.getBlocks()) {
assertEquals(expectedBlockRep, b.getReplication());
}
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(), ssInode.getFileReplication(iip.getPathSnapshotId()));
}
}
Aggregations