Search in sources :

Example 21 with HdfsDataOutputStream

use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.

the class TestFileLimit method testMaxBlocksPerFileLimit.

@Test(timeout = 60000)
public void testMaxBlocksPerFileLimit() throws Exception {
    Configuration conf = new HdfsConfiguration();
    // Make a small block size and a low limit
    final long blockSize = 4096;
    final long numBlocks = 2;
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setLong(DFSConfigKeys.DFS_NAMENODE_MAX_BLOCKS_PER_FILE_KEY, numBlocks);
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
    FileSystem fs = cluster.getFileSystem();
    HdfsDataOutputStream fout = (HdfsDataOutputStream) fs.create(new Path("/testmaxfilelimit"));
    try {
        // Write maximum number of blocks
        fout.write(new byte[(int) blockSize * (int) numBlocks]);
        fout.hflush();
        // Try to write one more block
        try {
            fout.write(new byte[1]);
            fout.hflush();
            assert false : "Expected IOException after writing too many blocks";
        } catch (IOException e) {
            GenericTestUtils.assertExceptionContains("File has reached the limit" + " on maximum number of", e);
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Configuration(org.apache.hadoop.conf.Configuration) FileSystem(org.apache.hadoop.fs.FileSystem) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) Test(org.junit.Test)

Example 22 with HdfsDataOutputStream

use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.

the class TestINodeFileUnderConstructionWithSnapshot method testGetBlockLocations.

/**
   * call DFSClient#callGetBlockLocations(...) for snapshot file. Make sure only
   * blocks within the size range are returned.
   */
@Test
public void testGetBlockLocations() throws Exception {
    final Path root = new Path("/");
    final Path file = new Path("/file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    // take a snapshot on root
    SnapshotTestHelper.createSnapshot(hdfs, root, "s1");
    final Path fileInSnapshot = SnapshotTestHelper.getSnapshotPath(root, "s1", file.getName());
    FileStatus status = hdfs.getFileStatus(fileInSnapshot);
    // make sure we record the size for the file
    assertEquals(BLOCKSIZE, status.getLen());
    // append data to file
    DFSTestUtil.appendFile(hdfs, file, BLOCKSIZE - 1);
    status = hdfs.getFileStatus(fileInSnapshot);
    // the size of snapshot file should still be BLOCKSIZE
    assertEquals(BLOCKSIZE, status.getLen());
    // the size of the file should be (2 * BLOCKSIZE - 1)
    status = hdfs.getFileStatus(file);
    assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
    // call DFSClient#callGetBlockLocations for the file in snapshot
    LocatedBlocks blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(), fileInSnapshot.toString(), 0, Long.MAX_VALUE);
    List<LocatedBlock> blockList = blocks.getLocatedBlocks();
    // should be only one block
    assertEquals(BLOCKSIZE, blocks.getFileLength());
    assertEquals(1, blockList.size());
    // check the last block
    LocatedBlock lastBlock = blocks.getLastLocatedBlock();
    assertEquals(0, lastBlock.getStartOffset());
    assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
    // take another snapshot
    SnapshotTestHelper.createSnapshot(hdfs, root, "s2");
    final Path fileInSnapshot2 = SnapshotTestHelper.getSnapshotPath(root, "s2", file.getName());
    // append data to file without closing
    HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    status = hdfs.getFileStatus(fileInSnapshot2);
    // the size of snapshot file should be BLOCKSIZE*2-1
    assertEquals(BLOCKSIZE * 2 - 1, status.getLen());
    // the size of the file should be (3 * BLOCKSIZE - 1)
    status = hdfs.getFileStatus(file);
    assertEquals(BLOCKSIZE * 3 - 1, status.getLen());
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(), fileInSnapshot2.toString(), 0, Long.MAX_VALUE);
    assertFalse(blocks.isUnderConstruction());
    assertTrue(blocks.isLastBlockComplete());
    blockList = blocks.getLocatedBlocks();
    // should be 2 blocks
    assertEquals(BLOCKSIZE * 2 - 1, blocks.getFileLength());
    assertEquals(2, blockList.size());
    // check the last block
    lastBlock = blocks.getLastLocatedBlock();
    assertEquals(BLOCKSIZE, lastBlock.getStartOffset());
    assertEquals(BLOCKSIZE, lastBlock.getBlockSize());
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(), fileInSnapshot2.toString(), BLOCKSIZE, 0);
    blockList = blocks.getLocatedBlocks();
    assertEquals(1, blockList.size());
    // check blocks for file being written
    blocks = DFSClientAdapter.callGetBlockLocations(cluster.getNameNodeRpc(), file.toString(), 0, Long.MAX_VALUE);
    blockList = blocks.getLocatedBlocks();
    assertEquals(3, blockList.size());
    assertTrue(blocks.isUnderConstruction());
    assertFalse(blocks.isLastBlockComplete());
    lastBlock = blocks.getLastLocatedBlock();
    assertEquals(BLOCKSIZE * 2, lastBlock.getStartOffset());
    assertEquals(BLOCKSIZE - 1, lastBlock.getBlockSize());
    out.close();
}
Also used : Path(org.apache.hadoop.fs.Path) FileStatus(org.apache.hadoop.fs.FileStatus) LocatedBlocks(org.apache.hadoop.hdfs.protocol.LocatedBlocks) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) Test(org.junit.Test)

Example 23 with HdfsDataOutputStream

use of org.apache.hadoop.hdfs.client.HdfsDataOutputStream in project hadoop by apache.

the class TestINodeFileUnderConstructionWithSnapshot method testSnapshotWhileAppending.

/**
   * Test snapshot during file appending, before the corresponding
   * {@link FSDataOutputStream} instance closes.
   */
@Test(timeout = 60000)
public void testSnapshotWhileAppending() throws Exception {
    Path file = new Path(dir, "file");
    DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, seed);
    // 1. append without closing stream --> create snapshot
    HdfsDataOutputStream out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    SnapshotTestHelper.createSnapshot(hdfs, dir, "s0");
    out.close();
    // check: an INodeFileUnderConstructionWithSnapshot should be stored into s0's
    // deleted list, with size BLOCKSIZE*2
    INodeFile fileNode = (INodeFile) fsdir.getINode(file.toString());
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize());
    INodeDirectory dirNode = fsdir.getINode(dir.toString()).asDirectory();
    DirectoryDiff last = dirNode.getDiffs().getLast();
    // 2. append without closing stream
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    out.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH));
    // re-check nodeInDeleted_S0
    dirNode = fsdir.getINode(dir.toString()).asDirectory();
    assertEquals(BLOCKSIZE * 2, fileNode.computeFileSize(last.getSnapshotId()));
    // 3. take snapshot --> close stream
    hdfs.createSnapshot(dir, "s1");
    out.close();
    // check: an INodeFileUnderConstructionWithSnapshot with size BLOCKSIZE*3 should
    // have been stored in s1's deleted list
    fileNode = (INodeFile) fsdir.getINode(file.toString());
    dirNode = fsdir.getINode(dir.toString()).asDirectory();
    last = dirNode.getDiffs().getLast();
    assertTrue(fileNode.isWithSnapshot());
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
    // 4. modify file --> append without closing stream --> take snapshot -->
    // close stream
    hdfs.setReplication(file, (short) (REPLICATION - 1));
    out = appendFileWithoutClosing(file, BLOCKSIZE);
    hdfs.createSnapshot(dir, "s2");
    out.close();
    // re-check the size of nodeInDeleted_S1
    assertEquals(BLOCKSIZE * 3, fileNode.computeFileSize(last.getSnapshotId()));
}
Also used : Path(org.apache.hadoop.fs.Path) INodeDirectory(org.apache.hadoop.hdfs.server.namenode.INodeDirectory) DirectoryDiff(org.apache.hadoop.hdfs.server.namenode.snapshot.DirectoryWithSnapshotFeature.DirectoryDiff) HdfsDataOutputStream(org.apache.hadoop.hdfs.client.HdfsDataOutputStream) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Aggregations

HdfsDataOutputStream (org.apache.hadoop.hdfs.client.HdfsDataOutputStream)23 Test (org.junit.Test)17 Path (org.apache.hadoop.fs.Path)10 Nfs3FileAttributes (org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes)10 DFSClient (org.apache.hadoop.hdfs.DFSClient)9 NfsConfiguration (org.apache.hadoop.hdfs.nfs.conf.NfsConfiguration)8 ShellBasedIdMapping (org.apache.hadoop.security.ShellBasedIdMapping)8 FileHandle (org.apache.hadoop.nfs.nfs3.FileHandle)6 COMMIT_STATUS (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.COMMIT_STATUS)5 CommitCtx (org.apache.hadoop.hdfs.nfs.nfs3.OpenFileCtx.CommitCtx)5 IOException (java.io.IOException)4 Channel (org.jboss.netty.channel.Channel)4 Configuration (org.apache.hadoop.conf.Configuration)3 FileSystem (org.apache.hadoop.fs.FileSystem)3 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)3 Random (java.util.Random)2 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)2 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)2 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)2 WccData (org.apache.hadoop.nfs.nfs3.response.WccData)2