Search in sources :

Example 16 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestSimulatedFSDataset method testInjectionEmpty.

@Test
public void testInjectionEmpty() throws IOException {
    SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(0, blockReport.getNumberOfBlocks());
    int bytesAdded = addSomeBlocks(fsdataset);
    blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
    }
    // Inject blocks into an empty fsdataset
    //  - injecting the blocks we got above.
    SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
    sfsdataset.injectBlocks(bpid, blockReport);
    blockReport = sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
        assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(new ExtendedBlock(bpid, b)));
    }
    assertEquals(bytesAdded, sfsdataset.getDfsUsed());
    assertEquals(sfsdataset.getCapacity() - bytesAdded, sfsdataset.getRemaining());
}
Also used : BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 17 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestSimulatedFSDataset method testInvalidate.

@Test
public void testInvalidate() throws IOException {
    final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    int bytesAdded = addSomeBlocks(fsdataset);
    Block[] deleteBlocks = new Block[2];
    deleteBlocks[0] = new Block(1, 0, 0);
    deleteBlocks[1] = new Block(2, 0, 0);
    fsdataset.invalidate(bpid, deleteBlocks);
    checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0]));
    checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1]));
    long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
    assertEquals(bytesAdded - sizeDeleted, fsdataset.getDfsUsed());
    assertEquals(fsdataset.getCapacity() - bytesAdded + sizeDeleted, fsdataset.getRemaining());
    // Now make sure the rest of the blocks are valid
    for (int i = 3; i <= NUMBLOCKS; ++i) {
        Block b = new Block(i, 0, 0);
        assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid, b)));
    }
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) Test(org.junit.Test)

Example 18 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestSimulatedFSDataset method testInjectionNonEmpty.

@Test
public void testInjectionNonEmpty() throws IOException {
    SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(0, blockReport.getNumberOfBlocks());
    int bytesAdded = addSomeBlocks(fsdataset);
    blockReport = fsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
    }
    fsdataset = null;
    // Inject blocks into an non-empty fsdataset
    //  - injecting the blocks we got above.
    SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
    // Add come blocks whose block ids do not conflict with
    // the ones we are going to inject.
    bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS + 1, false);
    sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
    sfsdataset.injectBlocks(bpid, blockReport);
    blockReport = sfsdataset.getBlockReport(bpid);
    assertEquals(NUMBLOCKS * 2, blockReport.getNumberOfBlocks());
    for (Block b : blockReport) {
        assertNotNull(b);
        assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
        assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(new ExtendedBlock(bpid, b)));
    }
    assertEquals(bytesAdded, sfsdataset.getDfsUsed());
    assertEquals(sfsdataset.getCapacity() - bytesAdded, sfsdataset.getRemaining());
    // Now test that the dataset cannot be created if it does not have sufficient cap
    conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10);
    try {
        sfsdataset = getSimulatedFSDataset();
        sfsdataset.addBlockPool(bpid, conf);
        sfsdataset.injectBlocks(bpid, blockReport);
        assertTrue("Expected an IO exception", false);
    } catch (IOException e) {
    // ok - as expected
    }
}
Also used : BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) IOException(java.io.IOException) Test(org.junit.Test)

Example 19 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestSimulatedFSDataset method testGetMetaData.

@Test
public void testGetMetaData() throws IOException {
    final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
    ExtendedBlock b = new ExtendedBlock(bpid, FIRST_BLK_ID, 5, 0);
    try {
        assertTrue(fsdataset.getMetaDataInputStream(b) == null);
        assertTrue("Expected an IO exception", false);
    } catch (IOException e) {
    // ok - as expected
    }
    // Only need to add one but ....
    addSomeBlocks(fsdataset);
    b = new ExtendedBlock(bpid, FIRST_BLK_ID, 0, 0);
    InputStream metaInput = fsdataset.getMetaDataInputStream(b);
    DataInputStream metaDataInput = new DataInputStream(metaInput);
    short version = metaDataInput.readShort();
    assertEquals(BlockMetadataHeader.VERSION, version);
    DataChecksum checksum = DataChecksum.newDataChecksum(metaDataInput);
    assertEquals(DataChecksum.Type.NULL, checksum.getChecksumType());
    assertEquals(0, checksum.getChecksumSize());
}
Also used : DataInputStream(java.io.DataInputStream) InputStream(java.io.InputStream) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) DataInputStream(java.io.DataInputStream) DataChecksum(org.apache.hadoop.util.DataChecksum) Test(org.junit.Test)

Example 20 with ExtendedBlock

use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.

the class TestPipelinesFailover method testFailoverRightBeforeCommitSynchronization.

/**
   * Test the scenario where the NN fails over after issuing a block
   * synchronization request, but before it is committed. The
   * DN running the recovery should then fail to commit the synchronization
   * and a later retry will succeed.
   */
@Test(timeout = 30000)
public void testFailoverRightBeforeCommitSynchronization() throws Exception {
    final Configuration conf = new Configuration();
    // Disable permissions so that another user can recover the lease.
    conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false);
    conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
    FSDataOutputStream stm = null;
    final MiniDFSCluster cluster = newMiniCluster(conf, 3);
    try {
        cluster.waitActive();
        cluster.transitionToActive(0);
        Thread.sleep(500);
        LOG.info("Starting with NN 0 active");
        FileSystem fs = HATestUtil.configureFailoverFs(cluster, conf);
        stm = fs.create(TEST_PATH);
        // write a half block
        AppendTestUtil.write(stm, 0, BLOCK_SIZE / 2);
        stm.hflush();
        // Look into the block manager on the active node for the block
        // under construction.
        NameNode nn0 = cluster.getNameNode(0);
        ExtendedBlock blk = DFSTestUtil.getFirstBlock(fs, TEST_PATH);
        DatanodeDescriptor expectedPrimary = DFSTestUtil.getExpectedPrimaryNode(nn0, blk);
        LOG.info("Expecting block recovery to be triggered on DN " + expectedPrimary);
        // Find the corresponding DN daemon, and spy on its connection to the
        // active.
        DataNode primaryDN = cluster.getDataNode(expectedPrimary.getIpcPort());
        DatanodeProtocolClientSideTranslatorPB nnSpy = InternalDataNodeTestUtils.spyOnBposToNN(primaryDN, nn0);
        // Delay the commitBlockSynchronization call
        DelayAnswer delayer = new DelayAnswer(LOG);
        Mockito.doAnswer(delayer).when(nnSpy).commitBlockSynchronization(Mockito.eq(blk), // new genstamp
        Mockito.anyInt(), // new length
        Mockito.anyLong(), // close file
        Mockito.eq(true), // delete block
        Mockito.eq(false), // new targets
        (DatanodeID[]) Mockito.anyObject(), // new target storages
        (String[]) Mockito.anyObject());
        DistributedFileSystem fsOtherUser = createFsAsOtherUser(cluster, conf);
        assertFalse(fsOtherUser.recoverLease(TEST_PATH));
        LOG.info("Waiting for commitBlockSynchronization call from primary");
        delayer.waitForCall();
        LOG.info("Failing over to NN 1");
        cluster.transitionToStandby(0);
        cluster.transitionToActive(1);
        // Let the commitBlockSynchronization call go through, and check that
        // it failed with the correct exception.
        delayer.proceed();
        delayer.waitForResult();
        Throwable t = delayer.getThrown();
        if (t == null) {
            fail("commitBlockSynchronization call did not fail on standby");
        }
        GenericTestUtils.assertExceptionContains("Operation category WRITE is not supported", t);
        // Now, if we try again to recover the block, it should succeed on the new
        // active.
        loopRecoverLease(fsOtherUser, TEST_PATH);
        AppendTestUtil.check(fs, TEST_PATH, BLOCK_SIZE / 2);
    } finally {
        IOUtils.closeStream(stm);
        cluster.shutdown();
    }
}
Also used : NameNode(org.apache.hadoop.hdfs.server.namenode.NameNode) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) DelayAnswer(org.apache.hadoop.test.GenericTestUtils.DelayAnswer) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) DatanodeDescriptor(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor) DatanodeProtocolClientSideTranslatorPB(org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB) DatanodeID(org.apache.hadoop.hdfs.protocol.DatanodeID) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) FileSystem(org.apache.hadoop.fs.FileSystem) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Aggregations

ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)208 Test (org.junit.Test)124 Path (org.apache.hadoop.fs.Path)91 Configuration (org.apache.hadoop.conf.Configuration)71 MiniDFSCluster (org.apache.hadoop.hdfs.MiniDFSCluster)63 FileSystem (org.apache.hadoop.fs.FileSystem)62 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)55 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)53 IOException (java.io.IOException)41 DatanodeInfo (org.apache.hadoop.hdfs.protocol.DatanodeInfo)41 Block (org.apache.hadoop.hdfs.protocol.Block)38 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)34 DistributedFileSystem (org.apache.hadoop.hdfs.DistributedFileSystem)32 File (java.io.File)22 FSDataOutputStream (org.apache.hadoop.fs.FSDataOutputStream)20 LocatedBlocks (org.apache.hadoop.hdfs.protocol.LocatedBlocks)20 DatanodeID (org.apache.hadoop.hdfs.protocol.DatanodeID)18 FSNamesystem (org.apache.hadoop.hdfs.server.namenode.FSNamesystem)18 InetSocketAddress (java.net.InetSocketAddress)17 ArrayList (java.util.ArrayList)17