Search in sources :

Example 91 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestSnapshotBlocksMap method testDeletionWithZeroSizeBlock2.

/**
   * Make sure we delete 0-sized block when deleting an under-construction file
   */
@Test
public void testDeletionWithZeroSizeBlock2() throws Exception {
    final Path foo = new Path("/foo");
    final Path subDir = new Path(foo, "sub");
    final Path bar = new Path(subDir, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
    hdfs.append(bar);
    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc().addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null, null);
    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
    barNode = fsdir.getINode4Write(bar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(2, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    assertEquals(0, blks[1].getNumBytes());
    hdfs.delete(subDir, true);
    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
    barNode = fsdir.getINode(sbar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 92 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestSnapshotBlocksMap method testDeletionWithZeroSizeBlock3.

/**
   * 1. rename under-construction file with 0-sized blocks after snapshot.
   * 2. delete the renamed directory.
   * make sure we delete the 0-sized block.
   * see HDFS-5476.
   */
@Test
public void testDeletionWithZeroSizeBlock3() throws Exception {
    final Path foo = new Path("/foo");
    final Path subDir = new Path(foo, "sub");
    final Path bar = new Path(subDir, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
    hdfs.append(bar);
    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc().addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null, null);
    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
    // rename bar
    final Path bar2 = new Path(subDir, "bar2");
    hdfs.rename(bar, bar2);
    INodeFile bar2Node = fsdir.getINode4Write(bar2.toString()).asFile();
    blks = bar2Node.getBlocks();
    assertEquals(2, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    assertEquals(0, blks[1].getNumBytes());
    // delete subDir
    hdfs.delete(subDir, true);
    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", "sub/bar");
    barNode = fsdir.getINode(sbar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 93 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestSnapshotBlocksMap method testDeletionWithZeroSizeBlock.

/**
   * Make sure we delete 0-sized block when deleting an INodeFileUCWithSnapshot
   */
@Test
public void testDeletionWithZeroSizeBlock() throws Exception {
    final Path foo = new Path("/foo");
    final Path bar = new Path(foo, "bar");
    DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPLICATION, 0L);
    SnapshotTestHelper.createSnapshot(hdfs, foo, "s0");
    hdfs.append(bar);
    INodeFile barNode = fsdir.getINode4Write(bar.toString()).asFile();
    BlockInfo[] blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
    cluster.getNameNodeRpc().addBlock(bar.toString(), hdfs.getClient().getClientName(), previous, null, barNode.getId(), null, null);
    SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
    barNode = fsdir.getINode4Write(bar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(2, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
    assertEquals(0, blks[1].getNumBytes());
    hdfs.delete(bar, true);
    final Path sbar = SnapshotTestHelper.getSnapshotPath(foo, "s1", bar.getName());
    barNode = fsdir.getINode(sbar.toString()).asFile();
    blks = barNode.getBlocks();
    assertEquals(1, blks.length);
    assertEquals(BLOCKSIZE, blks[0].getNumBytes());
}
Also used : Path(org.apache.hadoop.fs.Path) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) Test(org.junit.Test)

Example 94 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class BlockManager method setBlockToken.

/** Generate a block token for the located block. */
public void setBlockToken(final LocatedBlock b, final AccessMode mode) throws IOException {
    if (isBlockTokenEnabled()) {
        // Use cached UGI if serving RPC calls.
        if (b.isStriped()) {
            Preconditions.checkState(b instanceof LocatedStripedBlock);
            LocatedStripedBlock sb = (LocatedStripedBlock) b;
            byte[] indices = sb.getBlockIndices();
            Token<BlockTokenIdentifier>[] blockTokens = new Token[indices.length];
            ExtendedBlock internalBlock = new ExtendedBlock(b.getBlock());
            for (int i = 0; i < indices.length; i++) {
                internalBlock.setBlockId(b.getBlock().getBlockId() + indices[i]);
                blockTokens[i] = blockTokenSecretManager.generateToken(NameNode.getRemoteUser().getShortUserName(), internalBlock, EnumSet.of(mode));
            }
            sb.setBlockTokens(blockTokens);
        } else {
            b.setBlockToken(blockTokenSecretManager.generateToken(NameNode.getRemoteUser().getShortUserName(), b.getBlock(), EnumSet.of(mode)));
        }
    }
}
Also used : LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Token(org.apache.hadoop.security.token.Token)

Example 95 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestDataTransferProtocol method testOpWrite.

@Test
public void testOpWrite() throws IOException {
    int numDataNodes = 1;
    final long BLOCK_ID_FUDGE = 128;
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
    try {
        cluster.waitActive();
        String poolId = cluster.getNamesystem().getBlockPoolId();
        datanode = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
        dnAddr = NetUtils.createSocketAddr(datanode.getXferAddr());
        FileSystem fileSys = cluster.getFileSystem();
        /* Test writing to finalized replicas */
        Path file = new Path("dataprotocol.dat");
        DFSTestUtil.createFile(fileSys, file, 1L, (short) numDataNodes, 0L);
        // get the first blockid for the file
        ExtendedBlock firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
        // test PIPELINE_SETUP_CREATE on a finalized block
        testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, "Cannot create an existing block", true);
        // test PIPELINE_DATA_STREAMING on a finalized block
        testWrite(firstBlock, BlockConstructionStage.DATA_STREAMING, 0L, "Unexpected stage", true);
        // test PIPELINE_SETUP_STREAMING_RECOVERY on an existing block
        long newGS = firstBlock.getGenerationStamp() + 1;
        testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_STREAMING_RECOVERY, newGS, "Cannot recover data streaming to a finalized replica", true);
        // test PIPELINE_SETUP_APPEND on an existing block
        newGS = firstBlock.getGenerationStamp() + 1;
        testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_APPEND, newGS, "Append to a finalized replica", false);
        firstBlock.setGenerationStamp(newGS);
        // test PIPELINE_SETUP_APPEND_RECOVERY on an existing block
        file = new Path("dataprotocol1.dat");
        DFSTestUtil.createFile(fileSys, file, 1L, (short) numDataNodes, 0L);
        firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
        newGS = firstBlock.getGenerationStamp() + 1;
        testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_APPEND_RECOVERY, newGS, "Recover appending to a finalized replica", false);
        // test PIPELINE_CLOSE_RECOVERY on an existing block
        file = new Path("dataprotocol2.dat");
        DFSTestUtil.createFile(fileSys, file, 1L, (short) numDataNodes, 0L);
        firstBlock = DFSTestUtil.getFirstBlock(fileSys, file);
        newGS = firstBlock.getGenerationStamp() + 1;
        testWrite(firstBlock, BlockConstructionStage.PIPELINE_CLOSE_RECOVERY, newGS, "Recover failed close to a finalized replica", false);
        firstBlock.setGenerationStamp(newGS);
        // Test writing to a new block. Don't choose the next sequential
        // block ID to avoid conflicting with IDs chosen by the NN.
        long newBlockId = firstBlock.getBlockId() + BLOCK_ID_FUDGE;
        ExtendedBlock newBlock = new ExtendedBlock(firstBlock.getBlockPoolId(), newBlockId, 0, firstBlock.getGenerationStamp());
        // test PIPELINE_SETUP_CREATE on a new block
        testWrite(newBlock, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, "Create a new block", false);
        // test PIPELINE_SETUP_STREAMING_RECOVERY on a new block
        newGS = newBlock.getGenerationStamp() + 1;
        newBlock.setBlockId(newBlock.getBlockId() + 1);
        testWrite(newBlock, BlockConstructionStage.PIPELINE_SETUP_STREAMING_RECOVERY, newGS, "Recover a new block", true);
        // test PIPELINE_SETUP_APPEND on a new block
        newGS = newBlock.getGenerationStamp() + 1;
        testWrite(newBlock, BlockConstructionStage.PIPELINE_SETUP_APPEND, newGS, "Cannot append to a new block", true);
        // test PIPELINE_SETUP_APPEND_RECOVERY on a new block
        newBlock.setBlockId(newBlock.getBlockId() + 1);
        newGS = newBlock.getGenerationStamp() + 1;
        testWrite(newBlock, BlockConstructionStage.PIPELINE_SETUP_APPEND_RECOVERY, newGS, "Cannot append to a new block", true);
        /* Test writing to RBW replicas */
        Path file1 = new Path("dataprotocol1.dat");
        DFSTestUtil.createFile(fileSys, file1, 1L, (short) numDataNodes, 0L);
        DFSOutputStream out = (DFSOutputStream) (fileSys.append(file1).getWrappedStream());
        out.write(1);
        out.hflush();
        FSDataInputStream in = fileSys.open(file1);
        firstBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
        firstBlock.setNumBytes(2L);
        try {
            // test PIPELINE_SETUP_CREATE on a RBW block
            testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_CREATE, 0L, "Cannot create a RBW block", true);
            // test PIPELINE_SETUP_APPEND on an existing block
            newGS = firstBlock.getGenerationStamp() + 1;
            testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_APPEND, newGS, "Cannot append to a RBW replica", true);
            // test PIPELINE_SETUP_APPEND on an existing block
            testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_APPEND_RECOVERY, newGS, "Recover append to a RBW replica", false);
            firstBlock.setGenerationStamp(newGS);
            // test PIPELINE_SETUP_STREAMING_RECOVERY on a RBW block
            file = new Path("dataprotocol2.dat");
            DFSTestUtil.createFile(fileSys, file, 1L, (short) numDataNodes, 0L);
            out = (DFSOutputStream) (fileSys.append(file).getWrappedStream());
            out.write(1);
            out.hflush();
            in = fileSys.open(file);
            firstBlock = DFSTestUtil.getAllBlocks(in).get(0).getBlock();
            firstBlock.setNumBytes(2L);
            newGS = firstBlock.getGenerationStamp() + 1;
            testWrite(firstBlock, BlockConstructionStage.PIPELINE_SETUP_STREAMING_RECOVERY, newGS, "Recover a RBW replica", false);
        } finally {
            IOUtils.closeStream(in);
            IOUtils.closeStream(out);
        }
    } finally {
        cluster.shutdown();
    }
}
Also used : Path(org.apache.hadoop.fs.Path) Configuration(org.apache.hadoop.conf.Configuration) Builder(org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder) FileSystem(org.apache.hadoop.fs.FileSystem) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) FSDataInputStream(org.apache.hadoop.fs.FSDataInputStream) Test(org.junit.Test)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17