use of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol in project hadoop by apache.
the class TestBlockRecovery method testNoReplicaUnderRecovery.
/**
* BlockRecoveryFI_10. DN has no ReplicaUnderRecovery.
*
* @throws IOException in case of an error
*/
@Test(timeout = 60000)
public void testNoReplicaUnderRecovery() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
dn.data.createRbw(StorageType.DEFAULT, block, false);
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
try {
RecoveryTaskContiguous.syncBlock(initBlockRecords(dn));
fail("Sync should fail");
} catch (IOException e) {
e.getMessage().startsWith("Cannot recover ");
}
DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
verify(namenode, never()).commitBlockSynchronization(any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class));
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol in project hadoop by apache.
the class TestDeadDatanode method testDeadDatanode.
/**
* Test to ensure namenode rejects request from dead datanode
* - Start a cluster
* - Shutdown the datanode and wait for it to be marked dead at the namenode
* - Send datanode requests to Namenode and make sure it is rejected
* appropriately.
*/
@Test
public void testDeadDatanode() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
cluster = new MiniDFSCluster.Builder(conf).build();
cluster.waitActive();
String poolId = cluster.getNamesystem().getBlockPoolId();
// wait for datanode to be marked live
DataNode dn = cluster.getDataNodes().get(0);
DatanodeRegistration reg = InternalDataNodeTestUtils.getDNRegistrationForBP(cluster.getDataNodes().get(0), poolId);
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true, 20000);
// Shutdown and wait for datanode to be marked dead
dn.shutdown();
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false, 20000);
DatanodeProtocol dnp = cluster.getNameNodeRpc();
ReceivedDeletedBlockInfo[] blocks = { new ReceivedDeletedBlockInfo(new Block(0), ReceivedDeletedBlockInfo.BlockStatus.RECEIVED_BLOCK, null) };
StorageReceivedDeletedBlocks[] storageBlocks = { new StorageReceivedDeletedBlocks(new DatanodeStorage(reg.getDatanodeUuid()), blocks) };
// Ensure blockReceived call from dead datanode is not rejected with
// IOException, since it's async, but the node remains unregistered.
dnp.blockReceivedAndDeleted(reg, poolId, storageBlocks);
BlockManager bm = cluster.getNamesystem().getBlockManager();
// IBRs are async, make sure the NN processes all of them.
bm.flushBlockOps();
assertFalse(bm.getDatanodeManager().getDatanode(reg).isRegistered());
// Ensure blockReport from dead datanode is rejected with IOException
StorageBlockReport[] report = { new StorageBlockReport(new DatanodeStorage(reg.getDatanodeUuid()), BlockListAsLongs.EMPTY) };
try {
dnp.blockReport(reg, poolId, report, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
fail("Expected IOException is not thrown");
} catch (IOException ex) {
// Expected
}
// Ensure heartbeat from dead datanode is rejected with a command
// that asks datanode to register again
StorageReport[] rep = { new StorageReport(new DatanodeStorage(reg.getDatanodeUuid()), false, 0, 0, 0, 0, 0) };
DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, rep, 0L, 0L, 0, 0, 0, null, true, SlowPeerReports.EMPTY_REPORT).getCommands();
assertEquals(1, cmd.length);
assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER.getAction());
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol in project hadoop by apache.
the class TestBlockRecovery method testZeroLenReplicas.
/**
* BlockRecoveryFI_07. max replica length from all DNs is zero.
*
* @throws IOException in case of an error
*/
@Test(timeout = 60000)
public void testZeroLenReplicas() throws IOException, InterruptedException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN).initReplicaRecovery(any(RecoveringBlock.class));
for (RecoveringBlock rBlock : initRecoveringBlocks()) {
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous);
spyTask.recover();
}
DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
verify(dnP).commitBlockSynchronization(block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null);
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol in project hadoop by apache.
the class TestBlockRecovery method testNotMatchedReplicaID.
/**
* BlockRecoveryFI_11. a replica's recovery id does not match new GS.
*
* @throws IOException in case of an error
*/
@Test(timeout = 60000)
public void testNotMatchedReplicaID() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Running " + GenericTestUtils.getMethodName());
}
ReplicaInPipeline replicaInfo = dn.data.createRbw(StorageType.DEFAULT, block, false).getReplica();
ReplicaOutputStreams streams = null;
try {
streams = replicaInfo.createStreams(true, DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
streams.getChecksumOut().write('a');
dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID + 1));
BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock);
try {
RecoveryTaskContiguous.syncBlock(initBlockRecords(dn));
fail("Sync should fail");
} catch (IOException e) {
e.getMessage().startsWith("Cannot recover ");
}
DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID);
verify(namenode, never()).commitBlockSynchronization(any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class));
} finally {
streams.close();
}
}
Aggregations