use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.
the class TestFileLimit method testMaxBlocksPerFileLimit.
@Test(timeout = 60000)
public void testMaxBlocksPerFileLimit() throws Exception {
Configuration conf = new HdfsConfiguration();
// Make a small block size and a low limit
final long blockSize = 4096;
final long numBlocks = 2;
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY, numBlocks);
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
FileSystem fs = cluster.getFileSystem();
HdfsDataOutputStream fout = (HdfsDataOutputStream) fs.create(new Path("/testmaxfilelimit"));
try {
// Write maximum number of blocks
fout.write(new byte[(int) blockSize * (int) numBlocks]);
fout.hflush();
// Try to write one more block
try {
fout.write(new byte[1]);
fout.hflush();
assert false : "Expected IOException after writing too many blocks";
} catch (IOException e) {
GenericTestUtils.assertExceptionContains("File has reached the limit" + " on maximum number of", e);
}
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.
the class TestINodeFileUnderConstructionWithSnapshot method testGetBlockLocations.
/**
* call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
* blocks within the size range are returned.
*/
@Test
public void testGetBlockLocations() throws Exception {
final Path root = new Path("/");
final Path file = new Path("/file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
// take a snapshot on root
SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
final Path fileInSnapshot = SnapshotTestHelper.getSnapshotPath(root, "s1", file.getName());
FileStatus status = hdfs.getFileStatus(fileInSnapshot);
// make sure we record the size for the file
assertEquals(BLOCKSIZE, status.getLen());
// append data to file
DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE - 1);
status = hdfs.getFileStatus(fileInSnapshot);
// the size of snapshot file should still be BLOCKSIZE
assertEquals(BLOCKSIZE, status.getLen());
// the size of the file should be (2 * BLOCKSIZE - 1)
status = hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
// call DFSClient#callGetBlockLocations for the file in snapshot
LocatedBlocks blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(), fileInSnapshot.toString(), 0, Long.MAX_VALUE);
List<LocatedBlock> blockList = blocks.getLocatedBlocks();
// should be only one block
assertEquals(BLOCKSIZE, blocks.getFileLength());
assertEquals(1, blockList.size());
// check the last block
LocatedBlock lastBlock = blocks.getLastLocatedBlock();
assertEquals(0, lastBlock.getStartOffset());
assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
// take another snapshot
SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root, "s2", file.getName());
// append data to file without closing
HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
status = hdfs.getFileStatus(fileInSnapshot2);
// the size of snapshot file should be BLOCKSIZE*2-1
assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
// the size of the file should be (3 * BLOCKSIZE - 1)
status = hdfs.getFileStatus(file);
assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(), fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
assertFalse(blocks.isUnderConstruction());
assertTrue(blocks.isLastBlockComplete());
blockList = blocks.getLocatedBlocks();
// should be 2 blocks
assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
assertEquals(2, blockList.size());
// check the last block
lastBlock = blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(), fileInSnapshot2.toString(), BLOCKSIZE, 0);
blockList = blocks.getLocatedBlocks();
assertEquals(1, blockList.size());
// check blocks for file being written
blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(), file.toString(), 0, Long.MAX_VALUE);
blockList = blocks.getLocatedBlocks();
assertEquals(3, blockList.size());
assertTrue(blocks.isUnderConstruction());
assertFalse(blocks.isLastBlockComplete());
lastBlock = blocks.getLastLocatedBlock();
assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
out.close();
}
use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.
the class TestINodeFileUnderConstructionWithSnapshot method testSnapshotWhileAppending.
/**
* Test snapshot during file appending, before the corresponding
* {@link FSDataOutputStream} instance closes.
*/
@Test(timeout = 60000)
public void testSnapshotWhileAppending() throws Exception {
Path file = new Path(dir, "file");
DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
// 1. append without closing stream --> create snapshot
HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
out.close();
// check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
// deleted list, with size BLOCKSIZE*2
INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
DirectoryDiff last = dirNode.getDiffs().getLast();
// 2. append without closing stream
out = appendFileWithoutClosing(file, BLOCKSIZE);
out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
// re-check nodeInDeleted_S0
dirNode = fsdir.getINode(dir.toString()).asDirectory();
assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
// 3. take snapshot --> close stream
hdfs.createSnapshot(dir, "s1");
out.close();
// check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
// have been stored in s1's deleted list
fileNode = (INodeFile) fsdir.getINode(file.toString());
dirNode = fsdir.getINode(dir.toString()).asDirectory();
last = dirNode.getDiffs().getLast();
assertTrue(fileNode.isWithSnapshot());
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
// 4. modify file --> append without closing stream --> take snapshot -->
// close stream
hdfs.setReplication(file, (short) (REPLICATION - 1));
out = appendFileWithoutClosing(file, BLOCKSIZE);
hdfs.createSnapshot(dir, "s2");
out.close();
// re-check the size of nodeInDeleted_S1
assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
}
Aggregations