use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestReplication method assertNoReplicationWasPerformed.
private void assertNoReplicationWasPerformed(MiniDFSCluster cluster) {
for (DataNode dn : cluster.getDataNodes()) {
MetricsRecordBuilder rb = getMetrics(dn.getMetrics().name());
assertCounter("BlocksReplicated", 0L, rb);
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestReplication method testNoExtraReplicationWhenBlockReceivedIsLate.
/**
* This test makes sure that, when a file is closed before all
* of the datanodes in the pipeline have reported their replicas,
* the NameNode doesn't consider the block under-replicated too
* aggressively. It is a regression test for HDFS-1172.
*/
@Test(timeout = 60000)
public void testNoExtraReplicationWhenBlockReceivedIsLate() throws Exception {
LOG.info("Test block replication when blockReceived is late");
final short numDataNodes = 3;
final short replication = 3;
final Configuration conf = new Configuration();
conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
final String testFile = "/replication-test-file";
final Path testPath = new Path(testFile);
final BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
try {
cluster.waitActive();
// Artificially delay IBR from 1 DataNode.
// this ensures that the client's completeFile() RPC will get to the
// NN before some of the replicas are reported.
NameNode nn = cluster.getNameNode();
DataNode dn = cluster.getDataNodes().get(0);
DatanodeProtocolClientSideTranslatorPB spy = InternalDataNodeTestUtils.spyOnBposToNN(dn, nn);
DelayAnswer delayer = new GenericTestUtils.DelayAnswer(LOG);
Mockito.doAnswer(delayer).when(spy).blockReceivedAndDeleted(Mockito.<DatanodeRegistration>anyObject(), Mockito.anyString(), Mockito.<StorageReceivedDeletedBlocks[]>anyObject());
FileSystem fs = cluster.getFileSystem();
// Create and close a small file with two blocks
DFSTestUtil.createFile(fs, testPath, 1500, replication, 0);
// schedule replication via BlockManager#computeReplicationWork
BlockManagerTestUtil.computeAllPendingWork(bm);
// Initially, should have some pending replication since the close()
// is earlier than at lease one of the reportReceivedDeletedBlocks calls
assertTrue(pendingReplicationCount(bm) > 0);
// release pending IBR.
delayer.waitForCall();
delayer.proceed();
delayer.waitForResult();
// make sure DataNodes do replication work if exists
for (DataNode d : cluster.getDataNodes()) {
DataNodeTestUtils.triggerHeartbeat(d);
}
// Wait until there is nothing pending
try {
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
return pendingReplicationCount(bm) == 0;
}
}, 100, 3000);
} catch (TimeoutException e) {
fail("timed out while waiting for no pending replication.");
}
// Check that none of the datanodes have serviced a replication request.
// i.e. that the NameNode didn't schedule any spurious replication.
assertNoReplicationWasPerformed(cluster);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestReplication method testReplicationWhenBlockCorruption.
/**
* Test that blocks should get replicated if we have corrupted blocks and
* having good replicas at least equal or greater to minreplication
*
* Simulate rbw blocks by creating dummy copies, then a DN restart to detect
* those corrupted blocks asap.
*/
@Test(timeout = 30000)
public void testReplicationWhenBlockCorruption() throws Exception {
MiniDFSCluster cluster = null;
try {
Configuration conf = new HdfsConfiguration();
conf.setLong(DFSConfigKeys.DFS_NAMENODE_RECONSTRUCTION_PENDING_TIMEOUT_SEC_KEY, 1);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).storagesPerDatanode(1).build();
FileSystem fs = cluster.getFileSystem();
Path filePath = new Path("/test");
FSDataOutputStream create = fs.create(filePath);
fs.setReplication(filePath, (short) 1);
create.write(new byte[1024]);
create.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath);
int numReplicaCreated = 0;
for (final DataNode dn : cluster.getDataNodes()) {
if (!dn.getFSDataset().contains(block)) {
cluster.getFsDatasetTestUtils(dn).injectCorruptReplica(block);
numReplicaCreated++;
}
}
assertEquals(2, numReplicaCreated);
fs.setReplication(filePath, (short) 3);
// Lets detect all DNs about dummy copied
cluster.restartDataNodes();
// blocks
cluster.waitActive();
cluster.triggerBlockReports();
DFSTestUtil.waitReplication(fs, filePath, (short) 3);
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestRollingUpgrade method testDFSAdminDatanodeUpgradeControlCommands.
@Test
public void testDFSAdminDatanodeUpgradeControlCommands() throws Exception {
// start a cluster
final Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final DFSAdmin dfsadmin = new DFSAdmin(conf);
DataNode dn = cluster.getDataNodes().get(0);
// check the datanode
final String dnAddr = dn.getDatanodeId().getIpcAddr(false);
final String[] args1 = { "-getDatanodeInfo", dnAddr };
runCmd(dfsadmin, true, args1);
// issue shutdown to the datanode.
final String[] args2 = { "-shutdownDatanode", dnAddr, "upgrade" };
runCmd(dfsadmin, true, args2);
// the datanode should be down.
GenericTestUtils.waitForThreadTermination("Async datanode shutdown thread", 100, 10000);
Assert.assertFalse("DataNode should exit", dn.isDatanodeUp());
// ping should fail.
assertEquals(-1, dfsadmin.run(args1));
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.server.datanode.DataNode in project hadoop by apache.
the class TestFsDatasetImpl method testDeletingBlocks.
@Test
public void testDeletingBlocks() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
ds.addBlockPool(BLOCKPOOL, conf);
FsVolumeImpl vol;
try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
vol = (FsVolumeImpl) volumes.get(0);
}
ExtendedBlock eb;
ReplicaInfo info;
List<Block> blockList = new ArrayList<>();
for (int i = 1; i <= 63; i++) {
eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
}
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
blockList.clear();
eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
} finally {
cluster.shutdown();
}
}
Aggregations