Search in sources :

Example 1 with DatanodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol in project hadoop by apache.

the class TestBlockRecovery method testNoReplicaUnderRecovery.

/**
   * BlockRecoveryFI_10. DN has no ReplicaUnderRecovery.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testNoReplicaUnderRecovery() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    dn.data.createRbw(StorageType.DEFAULT, block, false);
    BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
    try {
        RecoveryTaskContiguous.syncBlock(initBlockRecords(dn));
        fail("Sync should fail");
    } catch (IOException e) {
        e.getMessage().startsWith("Cannot recover ");
    }
    DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
    verify(namenode, never()).commitBlockSynchronization(any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class));
}
Also used : ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) IOException(java.io.IOException) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) Test(org.junit.Test)

Example 2 with DatanodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol in project hadoop by apache.

the class TestDeadDatanode method testDeadDatanode.

/**
   * Test to ensure namenode rejects request from dead datanode
   * - Start a cluster
   * - Shutdown the datanode and wait for it to be marked dead at the namenode
   * - Send datanode requests to Namenode and make sure it is rejected 
   *   appropriately.
   */
@Test
public void testDeadDatanode() throws Exception {
    Configuration conf = new HdfsConfiguration();
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
    conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
    cluster = new MiniDFSCluster.Builder(conf).build();
    cluster.waitActive();
    String poolId = cluster.getNamesystem().getBlockPoolId();
    // wait for datanode to be marked live
    DataNode dn = cluster.getDataNodes().get(0);
    DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
    // Shutdown and wait for datanode to be marked dead
    dn.shutdown();
    DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
    DatanodeProtocol dnp = cluster.getNameNodeRpc();
    ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(new Block(0), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
    StorageReceivedDeletedBlocks[] storageBlocks = { new StorageReceivedDeletedBlocks(new DatanodeStorage(reg.getDatanodeUuid()), blocks) };
    // Ensure blockReceived call from dead datanode is not rejected with
    // IOException, since it's async, but the node remains unregistered.
    dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
    BlockManager bm = cluster.getNamesystem().getBlockManager();
    // IBRs are async, make sure the NN processes all of them.
    bm.flushBlockOps();
    assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());
    // Ensure blockReport from dead datanode is rejected with IOException
    StorageBlockReport[] report = { new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()), BlockListAsLongs.EMPTY) };
    try {
        dnp.blockReport(reg, poolId, report, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
        fail("Expected IOException is not thrown");
    } catch (IOException ex) {
    // Expected
    }
    // Ensure heartbeat from dead datanode is rejected with a command
    // that asks datanode to register again
    StorageReport[] rep = { new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()), false, 0, 0, 0, 0, 0) };
    DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT).getCommands();
    assertEquals(1, cmd.length);
    assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER.getAction());
}
Also used : Configuration(org.apache.hadoop.conf.Configuration) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) StorageBlockReport(org.apache.hadoop.hdfs.server.protocol.StorageBlockReport) StorageReport(org.apache.hadoop.hdfs.server.protocol.StorageReport) IOException(java.io.IOException) HdfsConfiguration(org.apache.hadoop.hdfs.HdfsConfiguration) DatanodeRegistration(org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration) DatanodeCommand(org.apache.hadoop.hdfs.server.protocol.DatanodeCommand) DataNode(org.apache.hadoop.hdfs.server.datanode.DataNode) BlockManager(org.apache.hadoop.hdfs.server.blockmanagement.BlockManager) BlockReportContext(org.apache.hadoop.hdfs.server.protocol.BlockReportContext) DatanodeStorage(org.apache.hadoop.hdfs.server.protocol.DatanodeStorage) Block(org.apache.hadoop.hdfs.protocol.Block) StorageReceivedDeletedBlocks(org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) ReceivedDeletedBlockInfo(org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo) Test(org.junit.Test)

Example 3 with DatanodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol in project hadoop by apache.

the class TestBlockRecovery method testZeroLenReplicas.

/**
   * BlockRecoveryFI_07. max replica length from all DNs is zero.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testZeroLenReplicas() throws IOException, InterruptedException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
    for (RecoveringBlock rBlock : initRecoveringBlocks()) {
        BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
        BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous);
        spyTask.recover();
    }
    DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
    verify(dnP).commitBlockSynchronization(block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
Also used : ReplicaRecoveryInfo(org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo) RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) Test(org.junit.Test)

Example 4 with DatanodeProtocol

use of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol in project hadoop by apache.

the class TestBlockRecovery method testNotMatchedReplicaID.

/**
   * BlockRecoveryFI_11. a replica's recovery id does not match new GS.
   *
   * @throws IOException in case of an error
   */
@Test(timeout = 60000)
public void testNotMatchedReplicaID() throws IOException {
    if (LOG.isDebugEnabled()) {
        LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaInPipeline replicaInfo = dn.data.createRbw(StorageType.DEFAULT, block, false).getReplica();
    ReplicaOutputStreams streams = null;
    try {
        streams = replicaInfo.createStreams(true, DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
        streams.getChecksumOut().write('a');
        dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID + 1));
        BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
        try {
            RecoveryTaskContiguous.syncBlock(initBlockRecords(dn));
            fail("Sync should fail");
        } catch (IOException e) {
            e.getMessage().startsWith("Cannot recover ");
        }
        DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
        verify(namenode, never()).commitBlockSynchronization(any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class));
    } finally {
        streams.close();
    }
}
Also used : RecoveringBlock(org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock) ExtendedBlock(org.apache.hadoop.hdfs.protocol.ExtendedBlock) ReplicaOutputStreams(org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams) IOException(java.io.IOException) InterDatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol) DatanodeProtocol(org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol) Test(org.junit.Test)

Aggregations

DatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol)4 Test (org.junit.Test)4 IOException (java.io.IOException)3 InterDatanodeProtocol (org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol)3 ExtendedBlock (org.apache.hadoop.hdfs.protocol.ExtendedBlock)2 RecoveringBlock (org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock)2 Configuration (org.apache.hadoop.conf.Configuration)1 HdfsConfiguration (org.apache.hadoop.hdfs.HdfsConfiguration)1 Block (org.apache.hadoop.hdfs.protocol.Block)1 BlockManager (org.apache.hadoop.hdfs.server.blockmanagement.BlockManager)1 DataNode (org.apache.hadoop.hdfs.server.datanode.DataNode)1 ReplicaOutputStreams (org.apache.hadoop.hdfs.server.datanode.fsdataset.ReplicaOutputStreams)1 BlockReportContext (org.apache.hadoop.hdfs.server.protocol.BlockReportContext)1 DatanodeCommand (org.apache.hadoop.hdfs.server.protocol.DatanodeCommand)1 DatanodeRegistration (org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration)1 DatanodeStorage (org.apache.hadoop.hdfs.server.protocol.DatanodeStorage)1 ReceivedDeletedBlockInfo (org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo)1 ReplicaRecoveryInfo (org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo)1 StorageBlockReport (org.apache.hadoop.hdfs.server.protocol.StorageBlockReport)1 StorageReceivedDeletedBlocks (org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks)1