use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class BlockReportTestBase method blockReport_01.
/**
* Test write a file, verifies and closes it. Then the length of the blocks
* are messed up and BlockReport is forced.
* The modification of blocks' length has to be ignored
*
* @throws java.io.IOException on an error
*/
@Test(timeout = 300000)
public void blockReport_01() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
ArrayList<Block> blocks = prepareForRide(filePath, METHOD_NAME, FILE_SIZE);
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + blocks.size());
}
long[] oldLengths = new long[blocks.size()];
int tempLen;
for (int i = 0; i < blocks.size(); i++) {
Block b = blocks.get(i);
if (LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " before\t" + "Size " + b.getNumBytes());
}
oldLengths[i] = b.getNumBytes();
if (LOG.isDebugEnabled()) {
LOG.debug("Setting new length");
}
tempLen = rand.nextInt(BLOCK_SIZE);
b.set(b.getBlockId(), tempLen, b.getGenerationStamp());
if (LOG.isDebugEnabled()) {
LOG.debug("Block " + b.getBlockName() + " after\t " + "Size " + b.getNumBytes());
}
}
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N0);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
List<LocatedBlock> blocksAfterReport = DFSTestUtil.getAllBlocks(fs.open(filePath));
if (LOG.isDebugEnabled()) {
LOG.debug("After mods: Number of blocks allocated " + blocksAfterReport.size());
}
for (int i = 0; i < blocksAfterReport.size(); i++) {
ExtendedBlock b = blocksAfterReport.get(i).getBlock();
assertEquals("Length of " + i + "th block is incorrect", oldLengths[i], b.getNumBytes());
}
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class InternalDataNodeTestUtils method startDNWithMockNN.
/**
* Starts an instance of DataNode with NN mocked. Called should ensure to
* shutdown the DN
*
* @throws IOException
*/
public static DataNode startDNWithMockNN(Configuration conf, final InetSocketAddress nnSocketAddr, final String dnDataDir) throws IOException {
FileSystem.setDefaultUri(conf, "hdfs://" + nnSocketAddr.getHostName() + ":" + nnSocketAddr.getPort());
ArrayList<StorageLocation> locations = new ArrayList<StorageLocation>();
File dataDir = new File(dnDataDir);
FileUtil.fullyDelete(dataDir);
dataDir.mkdirs();
StorageLocation location = StorageLocation.parse(dataDir.getPath());
locations.add(location);
final DatanodeProtocolClientSideTranslatorPB namenode = mock(DatanodeProtocolClientSideTranslatorPB.class);
Mockito.doAnswer(new Answer<DatanodeRegistration>() {
@Override
public DatanodeRegistration answer(InvocationOnMock invocation) throws Throwable {
return (DatanodeRegistration) invocation.getArguments()[0];
}
}).when(namenode).registerDatanode(Mockito.any(DatanodeRegistration.class));
when(namenode.versionRequest()).thenReturn(new NamespaceInfo(1, TEST_CLUSTER_ID, TEST_POOL_ID, 1L));
when(namenode.sendHeartbeat(Mockito.any(DatanodeRegistration.class), Mockito.any(StorageReport[].class), Mockito.anyLong(), Mockito.anyLong(), Mockito.anyInt(), Mockito.anyInt(), Mockito.anyInt(), Mockito.any(VolumeFailureSummary.class), Mockito.anyBoolean(), Mockito.any(SlowPeerReports.class))).thenReturn(new HeartbeatResponse(new DatanodeCommand[0], new NNHAStatusHeartbeat(HAServiceState.ACTIVE, 1), null, ThreadLocalRandom.current().nextLong() | 1L));
DataNode dn = new DataNode(conf, locations, null, null) {
@Override
DatanodeProtocolClientSideTranslatorPB connectToNN(InetSocketAddress nnAddr) throws IOException {
Assert.assertEquals(nnSocketAddr, nnAddr);
return namenode;
}
};
// Trigger a heartbeat so that it acknowledges the NN as active.
dn.getAllBpOs().get(0).triggerHeartbeatForTests();
return dn;
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class BlockReportTestBase method blockReport_02.
/**
* Test write a file, verifies and closes it. Then a couple of random blocks
* is removed and BlockReport is forced; the FSNamesystem is pushed to
* recalculate required DN's activities such as replications and so on.
* The number of missing and under-replicated blocks should be the same in
* case of a single-DN cluster.
*
* @throws IOException in case of errors
*/
@Test(timeout = 300000)
public void blockReport_02() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
LOG.info("Running test " + METHOD_NAME);
Path filePath = new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
// mock around with newly created blocks and delete some
File dataDir = new File(cluster.getDataDirectory());
assertTrue(dataDir.isDirectory());
List<ExtendedBlock> blocks2Remove = new ArrayList<ExtendedBlock>();
List<Integer> removedIndex = new ArrayList<Integer>();
List<LocatedBlock> lBlocks = cluster.getNameNodeRpc().getBlockLocations(filePath.toString(), FILE_START, FILE_SIZE).getLocatedBlocks();
while (removedIndex.size() != 2) {
int newRemoveIndex = rand.nextInt(lBlocks.size());
if (!removedIndex.contains(newRemoveIndex))
removedIndex.add(newRemoveIndex);
}
for (Integer aRemovedIndex : removedIndex) {
blocks2Remove.add(lBlocks.get(aRemovedIndex).getBlock());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Number of blocks allocated " + lBlocks.size());
}
final DataNode dn0 = cluster.getDataNodes().get(DN_N0);
for (ExtendedBlock b : blocks2Remove) {
if (LOG.isDebugEnabled()) {
LOG.debug("Removing the block " + b.getBlockName());
}
for (File f : findAllFiles(dataDir, new MyFileFilter(b.getBlockName(), true))) {
DataNodeTestUtils.getFSDataset(dn0).unfinalizeBlock(b);
if (!f.delete()) {
LOG.warn("Couldn't delete " + b.getBlockName());
} else {
LOG.debug("Deleted file " + f.toString());
}
}
}
waitTil(TimeUnit.SECONDS.toMillis(DN_RESCAN_EXTRA_WAIT));
// all blocks belong to the same file, hence same BP
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn0.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn0, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
BlockManagerTestUtil.getComputedDatanodeWork(cluster.getNamesystem().getBlockManager());
printStats();
assertEquals("Wrong number of MissingBlocks is found", blocks2Remove.size(), cluster.getNamesystem().getMissingBlocksCount());
assertEquals("Wrong number of UnderReplicatedBlocks is found", blocks2Remove.size(), cluster.getNamesystem().getUnderReplicatedBlocks());
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class BlockReportTestBase method blockReport_06.
/**
* Test creates a file and closes it.
* The second datanode is started in the cluster.
* As soon as the replication process is completed test runs
* Block report and checks that no underreplicated blocks are left
*
* @throws IOException in case of an error
*/
@Test(timeout = 300000)
public void blockReport_06() throws Exception {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
writeFile(METHOD_NAME, FILE_SIZE, filePath);
startDNandWait(filePath, true);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertEquals("Wrong number of PendingReplication Blocks", 0, cluster.getNamesystem().getUnderReplicatedBlocks());
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class TestBlockHasMultipleReplicasOnSameDN method testBlockHasMultipleReplicasOnSameDN.
/**
* Verify NameNode behavior when a given DN reports multiple replicas
* of a given block.
*/
@Test
public void testBlockHasMultipleReplicasOnSameDN() throws IOException {
String filename = makeFileName(GenericTestUtils.getMethodName());
Path filePath = new Path(filename);
// Write out a file with a few blocks.
DFSTestUtil.createFile(fs, filePath, BLOCK_SIZE, BLOCK_SIZE * NUM_BLOCKS, BLOCK_SIZE, NUM_DATANODES, seed);
// Get the block list for the file with the block locations.
LocatedBlocks locatedBlocks = client.getLocatedBlocks(filePath.toString(), 0, BLOCK_SIZE * NUM_BLOCKS);
// Generate a fake block report from one of the DataNodes, such
// that it reports one copy of each block on either storage.
DataNode dn = cluster.getDataNodes().get(0);
DatanodeRegistration dnReg = dn.getDNRegistrationForBP(bpid);
StorageBlockReport[] reports = new StorageBlockReport[cluster.getStoragesPerDatanode()];
ArrayList<ReplicaInfo> blocks = new ArrayList<>();
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
Block localBlock = locatedBlock.getBlock().getLocalBlock();
blocks.add(new FinalizedReplica(localBlock, null, null));
}
Collections.sort(blocks);
try (FsDatasetSpi.FsVolumeReferences volumes = dn.getFSDataset().getFsVolumeReferences()) {
BlockListAsLongs bll = BlockListAsLongs.encode(blocks);
for (int i = 0; i < cluster.getStoragesPerDatanode(); ++i) {
DatanodeStorage dns = new DatanodeStorage(volumes.get(i).getStorageID());
reports[i] = new StorageBlockReport(dns, bll);
}
}
// Should not assert!
cluster.getNameNodeRpc().blockReport(dnReg, bpid, reports, new BlockReportContext(1, 0, System.nanoTime(), 0L, true));
// Get the block locations once again.
locatedBlocks = client.getLocatedBlocks(filename, 0, BLOCK_SIZE * NUM_BLOCKS);
// Make sure that each block has two replicas, one on each DataNode.
for (LocatedBlock locatedBlock : locatedBlocks.getLocatedBlocks()) {
DatanodeInfo[] locations = locatedBlock.getLocations();
assertThat(locations.length, is((int) NUM_DATANODES));
assertThat(locations[0].getDatanodeUuid(), not(locations[1].getDatanodeUuid()));
}
}
Aggregations