use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class TestDatanodeRegistration method testRegistrationWithDifferentSoftwareVersions.
@Test
public void testRegistrationWithDifferentSoftwareVersions() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.set(DFSConfigKeys.DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY, "3.0.0");
conf.set(DFSConfigKeys.DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY, "3.0.0");
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
NamenodeProtocols rpcServer = cluster.getNameNodeRpc();
long nnCTime = cluster.getNamesystem().getFSImage().getStorage().getCTime();
StorageInfo mockStorageInfo = mock(StorageInfo.class);
doReturn(nnCTime).when(mockStorageInfo).getCTime();
DatanodeRegistration mockDnReg = mock(DatanodeRegistration.class);
doReturn(HdfsServerConstants.DATANODE_LAYOUT_VERSION).when(mockDnReg).getVersion();
doReturn("127.0.0.1").when(mockDnReg).getIpAddr();
doReturn(123).when(mockDnReg).getXferPort();
doReturn("fake-storage-id").when(mockDnReg).getDatanodeUuid();
doReturn(mockStorageInfo).when(mockDnReg).getStorageInfo();
// Should succeed when software versions are the same.
doReturn("3.0.0").when(mockDnReg).getSoftwareVersion();
rpcServer.registerDatanode(mockDnReg);
// Should succeed when software version of DN is above minimum required by NN.
doReturn("4.0.0").when(mockDnReg).getSoftwareVersion();
rpcServer.registerDatanode(mockDnReg);
// Should fail when software version of DN is below minimum required by NN.
doReturn("2.0.0").when(mockDnReg).getSoftwareVersion();
try {
rpcServer.registerDatanode(mockDnReg);
fail("Should not have been able to register DN with too-low version.");
} catch (IncorrectVersionException ive) {
GenericTestUtils.assertExceptionContains("The reported DataNode version is too low", ive);
LOG.info("Got expected exception", ive);
}
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class TestBlockRecovery method initBlockRecords.
private List<BlockRecord> initBlockRecords(DataNode spyDN) throws IOException {
List<BlockRecord> blocks = new ArrayList<BlockRecord>(1);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(block.getBlockPoolId());
BlockRecord blockRecord = new BlockRecord(new DatanodeID(dnR), spyDN, new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(), block.getGenerationStamp(), ReplicaState.FINALIZED));
blocks.add(blockRecord);
return blocks;
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class BlockReportTestBase method blockReport_09.
// Similar to BlockReport_08 but corrupts GS and len of the TEMPORARY's
// replica block. Expect the same behaviour: NN should simply ignore this
// block
@Test(timeout = 300000)
public void blockReport_09() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
final int DN_N1 = DN_N0 + 1;
final int bytesChkSum = 1024 * 1000;
conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, bytesChkSum);
conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 6 * bytesChkSum);
shutDownCluster();
startUpCluster();
try {
writeFile(METHOD_NAME, 12 * bytesChkSum, filePath);
Block bl = findBlock(filePath, 12 * bytesChkSum);
BlockChecker bc = new BlockChecker(filePath);
bc.start();
waitForTempReplica(bl, DN_N1);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N1);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, true);
sendBlockReports(dnR, poolId, reports);
printStats();
assertEquals("Wrong number of PendingReplication blocks", 2, cluster.getNamesystem().getPendingReplicationBlocks());
try {
bc.join();
} catch (InterruptedException e) {
}
} finally {
// return the initial state of the configuration
resetConfiguration();
}
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class BlockReportTestBase method blockReport_03.
/**
* Test writes a file and closes it.
* Block reported is generated with a bad GS for a single block.
* Block report is forced and the check for # of corrupted blocks is performed.
*
* @throws IOException in case of an error
*/
@Test(timeout = 300000)
public void blockReport_03() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
writeFile(METHOD_NAME, FILE_SIZE, filePath);
// all blocks belong to the same file, hence same BP
DataNode dn = cluster.getDataNodes().get(DN_N0);
String poolId = cluster.getNamesystem().getBlockPoolId();
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, true, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks", cluster.getNamesystem().getCorruptReplicaBlocks(), is(1L));
assertThat("Wrong number of PendingDeletion blocks", cluster.getNamesystem().getPendingDeletionBlocks(), is(0L));
}
use of org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration in project hadoop by apache.
the class BlockReportTestBase method blockReport_04.
/**
* Test writes a file and closes it.
* Block reported is generated with an extra block.
* Block report is forced and the check for # of pendingdeletion
* blocks is performed.
*
* @throws IOException in case of an error
*/
@Test(timeout = 300000)
public void blockReport_04() throws IOException {
final String METHOD_NAME = GenericTestUtils.getMethodName();
Path filePath = new Path("/" + METHOD_NAME + ".dat");
DFSTestUtil.createFile(fs, filePath, FILE_SIZE, REPL_FACTOR, rand.nextLong());
DataNode dn = cluster.getDataNodes().get(DN_N0);
// all blocks belong to the same file, hence same BP
String poolId = cluster.getNamesystem().getBlockPoolId();
// Create a bogus new block which will not be present on the namenode.
ExtendedBlock b = new ExtendedBlock(poolId, rand.nextLong(), 1024L, rand.nextLong());
dn.getFSDataset().createRbw(StorageType.DEFAULT, b, false);
DatanodeRegistration dnR = dn.getDNRegistrationForBP(poolId);
StorageBlockReport[] reports = getBlockReports(dn, poolId, false, false);
sendBlockReports(dnR, poolId, reports);
printStats();
assertThat("Wrong number of corrupt blocks", cluster.getNamesystem().getCorruptReplicaBlocks(), is(0L));
assertThat("Wrong number of PendingDeletion blocks", cluster.getNamesystem().getPendingDeletionBlocks(), is(1L));
}
Aggregations