use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotBlocksMap method testDeletionWithSnapshots.
/**
* Test deleting a file with snapshots. Need to check the blocksMap to make
* sure the corresponding record is updated correctly.
*/
@Test(timeout = 60000)
public void testDeletionWithSnapshots() throws Exception {
Path file0 = new Path(sub1, "file0");
Path file1 = new Path(sub1, "file1");
Path sub2 = new Path(sub1, "sub2");
Path file2 = new Path(sub2, "file2");
Path file3 = new Path(sub1, "file3");
Path file4 = new Path(sub1, "file4");
Path file5 = new Path(sub1, "file5");
// Create file under sub1
DFSTestUtil.createFile(hdfs, file0, 4 * BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file1, 2 * BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, file2, 3 * BLOCKSIZE, REPLICATION, seed);
// Normal deletion
{
final INodeFile f2 = assertBlockCollection(file2.toString(), 3, fsdir, blockmanager);
BlockInfo[] blocks = f2.getBlocks();
hdfs.delete(sub2, true);
// The INode should have been removed from the blocksMap
for (BlockInfo b : blocks) {
assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
}
// Create snapshots for sub1
final String[] snapshots = { "s0", "s1", "s2" };
DFSTestUtil.createFile(hdfs, file3, 5 * BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[0]);
DFSTestUtil.createFile(hdfs, file4, 1 * BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[1]);
DFSTestUtil.createFile(hdfs, file5, 7 * BLOCKSIZE, REPLICATION, seed);
SnapshotTestHelper.createSnapshot(hdfs, sub1, snapshots[2]);
// set replication so that the inode should be replaced for snapshots
{
INodeFile f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
Assert.assertSame(INodeFile.class, f1.getClass());
hdfs.setReplication(file1, (short) 2);
f1 = assertBlockCollection(file1.toString(), 2, fsdir, blockmanager);
assertTrue(f1.isWithSnapshot());
assertFalse(f1.isUnderConstruction());
}
// Check the block information for file0
final INodeFile f0 = assertBlockCollection(file0.toString(), 4, fsdir, blockmanager);
BlockInfo[] blocks0 = f0.getBlocks();
// Also check the block information for snapshot of file0
Path snapshotFile0 = SnapshotTestHelper.getSnapshotPath(sub1, "s0", file0.getName());
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
// Delete file0
hdfs.delete(file0, true);
// Make sure the blocks of file0 is still in blocksMap
for (BlockInfo b : blocks0) {
assertNotEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
// Compare the INode in the blocksMap with INodes for snapshots
String s1f0 = SnapshotTestHelper.getSnapshotPath(sub1, "s1", file0.getName()).toString();
assertBlockCollection(s1f0, 4, fsdir, blockmanager);
// Delete snapshot s1
hdfs.deleteSnapshot(sub1, "s1");
// Make sure the first block of file0 is still in blocksMap
for (BlockInfo b : blocks0) {
assertNotEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
assertBlockCollection(snapshotFile0.toString(), 4, fsdir, blockmanager);
try {
INodeFile.valueOf(fsdir.getINode(s1f0), s1f0);
fail("Expect FileNotFoundException when identifying the INode in a deleted Snapshot");
} catch (IOException e) {
assertExceptionContains("File does not exist: " + s1f0, e);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotDeletion method testDeleteEarliestSnapshot2.
/**
* Test deleting the earliest (first) snapshot. In this more complicated
* scenario, the snapshots are taken across directories.
* <pre>
* The test covers the following scenarios:
* 1. delete the first diff in the diff list of a directory
* 2. delete the first diff in the diff list of a file
* </pre>
* Also, the recursive cleanTree process should cover both INodeFile and
* INodeDirectory.
*/
@Test(timeout = 300000)
public void testDeleteEarliestSnapshot2() throws Exception {
Path noChangeDir = new Path(sub, "noChangeDir");
Path noChangeFile = new Path(noChangeDir, "noChangeFile");
Path metaChangeFile = new Path(noChangeDir, "metaChangeFile");
Path metaChangeDir = new Path(noChangeDir, "metaChangeDir");
Path toDeleteFile = new Path(metaChangeDir, "toDeleteFile");
DFSTestUtil.createFile(hdfs, noChangeFile, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, metaChangeFile, BLOCKSIZE, REPLICATION, seed);
DFSTestUtil.createFile(hdfs, toDeleteFile, BLOCKSIZE, REPLICATION, seed);
final INodeFile toDeleteFileNode = TestSnapshotBlocksMap.assertBlockCollection(toDeleteFile.toString(), 1, fsdir, blockmanager);
BlockInfo[] blocks = toDeleteFileNode.getBlocks();
// create snapshot s0 on dir
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// delete /TestSnapshot/sub/noChangeDir/metaChangeDir/toDeleteFile
hdfs.delete(toDeleteFile, true);
// the deletion adds diff of toDeleteFile and metaChangeDir
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// change metadata of /TestSnapshot/sub/noChangeDir/metaChangeDir and
// /TestSnapshot/sub/noChangeDir/metaChangeFile
hdfs.setReplication(metaChangeFile, REPLICATION_1);
hdfs.setOwner(metaChangeDir, "unknown", "unknown");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// create snapshot s1 on dir
hdfs.createSnapshot(dir, "s1");
checkQuotaUsageComputation(dir, 7, 3 * BLOCKSIZE * REPLICATION);
// delete snapshot s0
hdfs.deleteSnapshot(dir, "s0");
// namespace: remove toDeleteFile and its diff, metaChangeFile's diff,
// metaChangeDir's diff, dir's diff. diskspace: remove toDeleteFile, and
// metaChangeFile's replication factor decreases
checkQuotaUsageComputation(dir, 6, 2 * BLOCKSIZE * REPLICATION - BLOCKSIZE);
for (BlockInfo b : blocks) {
assertEquals(INVALID_INODE_ID, b.getBlockCollectionId());
}
// check 1. there is no snapshot s0
final INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
Snapshot snapshot0 = dirNode.getSnapshot(DFSUtil.string2Bytes("s0"));
assertNull(snapshot0);
Snapshot snapshot1 = dirNode.getSnapshot(DFSUtil.string2Bytes("s1"));
DirectoryDiffList diffList = dirNode.getDiffs();
assertEquals(1, diffList.asList().size());
assertEquals(snapshot1.getId(), diffList.getLast().getSnapshotId());
diffList = fsdir.getINode(metaChangeDir.toString()).asDirectory().getDiffs();
assertEquals(0, diffList.asList().size());
// check 2. noChangeDir and noChangeFile are still there
final INodeDirectory noChangeDirNode = (INodeDirectory) fsdir.getINode(noChangeDir.toString());
assertEquals(INodeDirectory.class, noChangeDirNode.getClass());
final INodeFile noChangeFileNode = (INodeFile) fsdir.getINode(noChangeFile.toString());
assertEquals(INodeFile.class, noChangeFileNode.getClass());
TestSnapshotBlocksMap.assertBlockCollection(noChangeFile.toString(), 1, fsdir, blockmanager);
// check 3: current metadata of metaChangeFile and metaChangeDir
FileStatus status = hdfs.getFileStatus(metaChangeDir);
assertEquals("unknown", status.getOwner());
assertEquals("unknown", status.getGroup());
status = hdfs.getFileStatus(metaChangeFile);
assertEquals(REPLICATION_1, status.getReplication());
TestSnapshotBlocksMap.assertBlockCollection(metaChangeFile.toString(), 1, fsdir, blockmanager);
// check 4: no snapshot copy for toDeleteFile
try {
hdfs.getFileStatus(toDeleteFile);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFile.toString(), e);
}
final Path toDeleteFileInSnapshot = SnapshotTestHelper.getSnapshotPath(dir, "s0", toDeleteFile.toString().substring(dir.toString().length()));
try {
hdfs.getFileStatus(toDeleteFileInSnapshot);
fail("should throw FileNotFoundException");
} catch (FileNotFoundException e) {
GenericTestUtils.assertExceptionContains("File does not exist: " + toDeleteFileInSnapshot.toString(), e);
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestSnapshotReplication method checkSnapshotFileReplication.
/**
* Check the replication for both the current file and all its prior snapshots
*
* @param currentFile
* the Path of the current file
* @param snapshotRepMap
* A map maintaining all the snapshots of the current file, as well
* as their expected replication number stored in their corresponding
* INodes
* @param expectedBlockRep
* The expected replication number
* @throws Exception
*/
private void checkSnapshotFileReplication(Path currentFile, Map<Path, Short> snapshotRepMap, short expectedBlockRep) throws Exception {
// First check the getPreferredBlockReplication for the INode of
// the currentFile
final INodeFile inodeOfCurrentFile = getINodeFile(currentFile);
for (BlockInfo b : inodeOfCurrentFile.getBlocks()) {
assertEquals(expectedBlockRep, b.getReplication());
}
// Then check replication for every snapshot
for (Path ss : snapshotRepMap.keySet()) {
final INodesInPath iip = fsdir.getINodesInPath(ss.toString(), DirOp.READ);
final INodeFile ssInode = iip.getLastINode().asFile();
// always == expectedBlockRep
for (BlockInfo b : ssInode.getBlocks()) {
assertEquals(expectedBlockRep, b.getReplication());
}
// Also check the number derived from INodeFile#getFileReplication
assertEquals(snapshotRepMap.get(ss).shortValue(), ssInode.getFileReplication(iip.getPathSnapshotId()));
}
}
use of org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo in project hadoop by apache.
the class TestOfflineImageViewerWithStripedBlocks method testFileSize.
private void testFileSize(int numBytes) throws IOException, UnresolvedLinkException, SnapshotAccessControlException {
fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
File orgFsimage = null;
Path file = new Path("/eczone/striped");
FSDataOutputStream out = fs.create(file, true);
byte[] bytes = DFSTestUtil.generateSequentialBytes(0, numBytes);
out.write(bytes);
out.close();
// Write results to the fsimage file
fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
fs.saveNamespace();
// Determine location of fsimage file
orgFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil.getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
if (orgFsimage == null) {
throw new RuntimeException("Didn't generate or can't find fsimage");
}
FSImageLoader loader = FSImageLoader.load(orgFsimage.getAbsolutePath());
String fileStatus = loader.getFileStatus("/eczone/striped");
long expectedFileSize = bytes.length;
// Verify space consumed present in BlockInfoStriped
FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
assertEquals(StripedFileTestUtil.getDefaultECPolicy().getId(), fileNode.getErasureCodingPolicyID());
assertTrue("Invalid block size", fileNode.getBlocks().length > 0);
long actualFileSize = 0;
for (BlockInfo blockInfo : fileNode.getBlocks()) {
assertTrue("Didn't find block striped information", blockInfo instanceof BlockInfoStriped);
actualFileSize += blockInfo.getNumBytes();
}
assertEquals("Wrongly computed file size contains striped blocks", expectedFileSize, actualFileSize);
// Verify space consumed present in filestatus
String EXPECTED_FILE_SIZE = "\"length\":" + String.valueOf(expectedFileSize);
assertTrue("Wrongly computed file size contains striped blocks, file status:" + fileStatus + ". Expected file size is : " + EXPECTED_FILE_SIZE, fileStatus.contains(EXPECTED_FILE_SIZE));
}
Aggregations