Search in sources :

Example 6 with StorageReceivedDeletedBlocks

use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.

the class TestDiskspaceQuotaUpdate method testQuotaIssuesWhileCommittingHelper.

private void testQuotaIssuesWhileCommittingHelper(DatanodeProtocolClientSideTranslatorPB nnSpy, final short initialReplication, final short finalReplication) throws Exception {
    final String logStmt = "BUG: Inconsistent storagespace for directory";
    final Path dir = new Path(getParent(GenericTestUtils.getMethodName()), String.format("%d-%d", initialReplication, finalReplication));
    final Path file = new Path(dir, "testfile");
    LogCapturer logs = LogCapturer.captureLogs(NameNode.LOG);
    Mockito.doAnswer(new Answer<Object>() {

        @Override
        public Object answer(InvocationOnMock invocation) throws Throwable {
            if (finalReplication != initialReplication) {
                getDFS().setReplication(file, finalReplication);
            }
            // Call getContentSummary before the DN can notify the NN
            // that the block has been received to check for discrepancy
            getDFS().getContentSummary(dir);
            invocation.callRealMethod();
            return null;
        }
    }).when(nnSpy).blockReceivedAndDeleted(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageReceivedDeletedBlocks[]>anyObject());
    getDFS().mkdirs(dir);
    getDFS().setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1);
    DFSTestUtil.createFile(getDFS(), file, BLOCKSIZE / 2, initialReplication, 1L);
    // Also check for discrepancy after completing the file
    getDFS().getContentSummary(dir);
    assertFalse(logs.getOutput().contains(logStmt));
}
Also used : Path(org.apache.hadoop.fs.Path) InvocationOnMock(org.mockito.invocation.InvocationOnMock) LogCapturer(org.apache.hadoop.test.GenericTestUtils.LogCapturer) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)

Example 7 with StorageReceivedDeletedBlocks

use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.

the class TestDeadDatanode method testDeadDatanode.

/**
   * Test to ensure namenode rejects request from dead datanode
   * - Start a cluster
   * - Shutdown the datanode and wait for it to be marked dead at the namenode
   * - Send datanode requests to Namenode and make sure it is rejected 
   *   appropriately.
   */
@Test
public void testDeadDatanode() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    DatanodeProtocol dnp = cluster.getNameNodeRpc();
    ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(new Block(0), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
    StorageReceivedDeletedBlocks[] storageBlocks = { new StorageReceivedDeletedBlocks(new DatanodeStorage(reg.getDatanodeUuid()), blocks) };
    // Ensure blockReceived call from dead datanode is not rejected with
    // IOException, since it's async, but the node remains unregistered.
    dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    // IBRs are async, make sure the NN processes all of them.
    bm.flushBlockOps();
    assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());
    // Ensure blockReport from dead datanode is rejected with IOException
    StorageBlockReport[] report = { new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()), BlockListAsLongs.EMPTY) };
    try {
        dnp.blockReport(reg, poolId, report, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
        fail("Expected IOException is not thrown");
    } catch (IOException ex) {
    // Expected
    }
    // Ensure heartbeat from dead datanode is rejected with a command
    // that asks datanode to register again
    StorageReport[] rep = { new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()), false, 0, 0, 0, 0, 0) };
    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT).getCommands();
    assertEquals(1, cmd.length);
    assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER.getAction());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 8 with StorageReceivedDeletedBlocks

use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.

the class TestPendingReconstruction method testBlockReceived.

/**
   * Test if DatanodeProtocol#blockReceivedAndDeleted can correctly update the
   * pending reconstruction. Also make sure the blockReceivedAndDeleted call is
   * idempotent to the pending reconstruction.
   */
@Test
public void testBlockReceived() throws Exception {
    final Configuration conf = new HdfsConfiguration();
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
    MiniDFSCluster cluster = null;
    try {
        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_COUNT).build();
        cluster.waitActive();
        DistributedFileSystem hdfs = cluster.getFileSystem();
        FSNamesystem fsn = cluster.getNamesystem();
        BlockManager blkManager = fsn.getBlockManager();
        final String file = "/tmp.txt";
        final Path filePath = new Path(file);
        short replFactor = 1;
        DFSTestUtil.createFile(hdfs, filePath, 1024L, replFactor, 0);
        // temporarily stop the heartbeat
        ArrayList<DataNode> datanodes = cluster.getDataNodes();
        for (int i = 0; i < DATANODE_COUNT; i++) {
            DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), true);
        }
        hdfs.setReplication(filePath, (short) DATANODE_COUNT);
        BlockManagerTestUtil.computeAllPendingWork(blkManager);
        assertEquals(1, blkManager.pendingReconstruction.size());
        INodeFile fileNode = fsn.getFSDirectory().getINode4Write(file).asFile();
        BlockInfo[] blocks = fileNode.getBlocks();
        assertEquals(DATANODE_COUNT - 1, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        LocatedBlock locatedBlock = hdfs.getClient().getLocatedBlocks(file, 0).get(0);
        DatanodeInfo existingDn = (locatedBlock.getLocations())[0];
        int reportDnNum = 0;
        String poolId = cluster.getNamesystem().getBlockPoolId();
        // report to NN
        for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
            if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
                DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
                StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
                cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
                reportDnNum++;
            }
        }
        // IBRs are async, make sure the NN processes all of them.
        cluster.getNamesystem().getBlockManager().flushBlockOps();
        assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        // let the same datanodes report again
        for (int i = 0; i < DATANODE_COUNT && reportDnNum < 2; i++) {
            if (!datanodes.get(i).getDatanodeId().equals(existingDn)) {
                DatanodeRegistration dnR = datanodes.get(i).getDNRegistrationForBP(poolId);
                StorageReceivedDeletedBlocks[] report = { new StorageReceivedDeletedBlocks(new DatanodeStorage("Fake-storage-ID-Ignored"), new ReceivedDeletedBlockInfo[] { new ReceivedDeletedBlockInfo(blocks[0], BlockStatus.RECEIVED_BLOCK, "") }) };
                cluster.getNameNodeRpc().blockReceivedAndDeleted(dnR, poolId, report);
                reportDnNum++;
            }
        }
        cluster.getNamesystem().getBlockManager().flushBlockOps();
        assertEquals(DATANODE_COUNT - 3, blkManager.pendingReconstruction.getNumReplicas(blocks[0]));
        // re-enable heartbeat for the datanode that has data
        for (int i = 0; i < DATANODE_COUNT; i++) {
            DataNodeTestUtils.setHeartbeatsDisabledForTests(datanodes.get(i), false);
            DataNodeTestUtils.triggerHeartbeat(datanodes.get(i));
        }
        Thread.sleep(5000);
        assertEquals(0, blkManager.pendingReconstruction.size());
    } finally {
        if (cluster != null) {
            cluster.shutdown();
        }
    }
}
Also used : Path(org.apache.hadoop.fs.Path) DatanodeInfo(org.apache.hadoop.hdfs.protocol.DatanodeInfo) MiniDFSCluster(org.apache.hadoop.hdfs.MiniDFSCluster) Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DistributedFileSystem(org.apache.hadoop.hdfs.DistributedFileSystem) INodeFile(org.apache.hadoop.hdfs.server.namenode.INodeFile) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) FSNamesystem(org.apache.hadoop.hdfs.server.namenode.FSNamesystem) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 9 with StorageReceivedDeletedBlocks

use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.

the class TestAddStripedBlocks method testAddUCReplica.

/**
   * Test BlockInfoStripedUnderConstruction#addReplicaIfNotPresent in different
   * scenarios.
   */
@Test
public void testAddUCReplica() throws Exception {
    final Path file = new Path("/file1");
    final List<String> storageIDs = new ArrayList<>();
    // create an empty file
    FSDataOutputStream out = null;
    try {
        out = dfs.create(file, (short) 1);
        // 1. create the UC striped block
        FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
        INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
        cluster.getNamesystem().getAdditionalBlock(file.toString(), fileNode.getId(), dfs.getClient().getClientName(), null, null, null, null);
        BlockInfo lastBlock = fileNode.getLastBlock();
        DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        // 2. mimic incremental block reports and make sure the uc-replica list in
        // the BlockInfoUCStriped is correct
        int i = 0;
        for (DataNode dn : cluster.getDataNodes()) {
            final Block block = new Block(lastBlock.getBlockId() + i++, 0, lastBlock.getGenerationStamp());
            DatanodeStorage storage = new DatanodeStorage(UUID.randomUUID().toString());
            storageIDs.add(storage.getStorageID());
            StorageReceivedDeletedBlocks[] reports = DFSTestUtil.makeReportForReceivedBlock(block, BlockStatus.RECEIVING_BLOCK, storage);
            for (StorageReceivedDeletedBlocks report : reports) {
                cluster.getNamesystem().processIncrementalBlockReport(dn.getDatanodeId(), report);
            }
        }
        // make sure lastBlock is correct and the storages have been updated
        locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
        indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
        Assert.assertEquals(groupSize, locs.length);
        Assert.assertEquals(groupSize, indices.length);
        for (DatanodeStorageInfo newstorage : locs) {
            Assert.assertTrue(storageIDs.contains(newstorage.getStorageID()));
        }
    } finally {
        IOUtils.cleanup(null, out);
    }
    // 3. restart the namenode. mimic the full block reports and check the
    // uc-replica list again
    cluster.restartNameNode(true);
    final String bpId = cluster.getNamesystem().getBlockPoolId();
    INodeFile fileNode = cluster.getNamesystem().getFSDirectory().getINode4Write(file.toString()).asFile();
    BlockInfo lastBlock = fileNode.getLastBlock();
    int i = groupSize - 1;
    for (DataNode dn : cluster.getDataNodes()) {
        String storageID = storageIDs.get(i);
        final Block block = new Block(lastBlock.getBlockId() + i--, lastBlock.getGenerationStamp(), 0);
        DatanodeStorage storage = new DatanodeStorage(storageID);
        List<ReplicaBeingWritten> blocks = new ArrayList<>();
        ReplicaBeingWritten replica = new ReplicaBeingWritten(block, null, null, null);
        blocks.add(replica);
        BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
        StorageBlockReport[] reports = { new StorageBlockReport(storage, bll) };
        cluster.getNameNodeRpc().blockReport(dn.getDNRegistrationForBP(bpId), bpId, reports, new BlockReportContext(1, 0, System.nanoTime(), 0, true));
    }
    DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature().getExpectedStorageLocations();
    byte[] indices = lastBlock.getUnderConstructionFeature().getBlockIndices();
    Assert.assertEquals(groupSize, locs.length);
    Assert.assertEquals(groupSize, indices.length);
    for (i = 0; i < groupSize; i++) {
        Assert.assertEquals(storageIDs.get(i), locs[groupSize - 1 - i].getStorageID());
        Assert.assertEquals(groupSize - i - 1, indices[i]);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) ReplicaBeingWritten(org.apache.hadoop.hdfs.server.datanode.ReplicaBeingWritten) ArrayList(java.util.ArrayList) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) DatanodeStorageInfo(org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo) BlockInfo(org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) BlockListAsLongs(org.apache.hadoop.hdfs.protocol.BlockListAsLongs) LocatedBlock(org.apache.hadoop.hdfs.protocol.LocatedBlock) Block(org.apache.hadoop.hdfs.protocol.Block) LocatedStripedBlock(org.apache.hadoop.hdfs.protocol.LocatedStripedBlock) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) Test(org.junit.Test)

Example 10 with StorageReceivedDeletedBlocks

use of org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks in project hadoop by apache.

the class DatanodeProtocolClientSideTranslatorPB method blockReceivedAndDeleted.

@Override
public void blockReceivedAndDeleted(DatanodeRegistration registration, String poolId, StorageReceivedDeletedBlocks[] receivedAndDeletedBlocks) throws IOException {
    BlockReceivedAndDeletedRequestProto.Builder builder = BlockReceivedAndDeletedRequestProto.newBuilder().setRegistration(PBHelper.convert(registration)).setBlockPoolId(poolId);
    for (StorageReceivedDeletedBlocks storageBlock : receivedAndDeletedBlocks) {
        StorageReceivedDeletedBlocksProto.Builder repBuilder = StorageReceivedDeletedBlocksProto.newBuilder();
        // Set for wire compatibility.
        repBuilder.setStorageUuid(storageBlock.getStorage().getStorageID());
        repBuilder.setStorage(PBHelperClient.convert(storageBlock.getStorage()));
        for (ReceivedDeletedBlockInfo rdBlock : storageBlock.getBlocks()) {
            repBuilder.addBlocks(PBHelper.convert(rdBlock));
        }
        builder.addBlocks(repBuilder.build());
    }
    try {
        rpcProxy.blockReceivedAndDeleted(NULL_CONTROLLER, builder.build());
    } catch (ServiceException se) {
        throw ProtobufHelper.getRemoteException(se);
    }
}
Also used : BlockReceivedAndDeletedRequestProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockReceivedAndDeletedRequestProto) ServiceException(com.google.protobuf.ServiceException) StorageReceivedDeletedBlocksProto(org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)

Aggregations

StorageReceivedDeletedBlocks (org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)13 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)10 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)8 Path (org.apache.hadoop.fs.Path)6 Test (org.junit.Test)6 Block (org.apache.hadoop.hdfs.protocol.Block)5 LocatedBlock (org.apache.hadoop.hdfs.protocol.LocatedBlock)5 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)5 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)4 IOException (java.io.IOException)3 ArrayList (java.util.ArrayList)3 Configuration (org.apache.hadoop.conf.Configuration)3 BlockInfo (org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo)3 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)3 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)3 ServiceException (com.google.protobuf.ServiceException)2 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)2 BlockListAsLongs (org.apache.hadoop.hdfs.protocol.BlockListAsLongs)2 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 LocatedStripedBlock (org.apache.hadoop.hdfs.protocol.LocatedStripedBlock)2