use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestWriteToReplica method testClose.
// test close
@Test
public void testClose() throws Exception {
MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> dataSet = DataNodeTestUtils.getFSDataset(dn);
// set up replicasMap
String bpid = cluster.getNamesystem().getBlockPoolId();
ExtendedBlock[] blocks = setup(bpid, cluster.getFsDatasetTestUtils(dn));
// test close
testClose(dataSet, blocks);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestInterDatanodeProtocol method checkMetaInfo.
public static void checkMetaInfo(ExtendedBlock b, DataNode dn) throws IOException {
Block metainfo = DataNodeTestUtils.getFSDataset(dn).getStoredBlock(b.getBlockPoolId(), b.getBlockId());
Assert.assertEquals(b.getBlockId(), metainfo.getBlockId());
Assert.assertEquals(b.getNumBytes(), metainfo.getNumBytes());
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestInterDatanodeProtocol method testUpdateReplicaUnderRecovery.
/**
* Test for
* {@link FsDatasetImpl#updateReplicaUnderRecovery(ExtendedBlock, long, long)}
* */
@Test
public void testUpdateReplicaUnderRecovery() throws IOException {
MiniDFSCluster cluster = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
//create a file
DistributedFileSystem dfs = cluster.getFileSystem();
String filestr = "/foo";
Path filepath = new Path(filestr);
DFSTestUtil.createFile(dfs, filepath, 1024L, (short) 3, 0L);
//get block info
final LocatedBlock locatedblock = getLastLocatedBlock(DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
Assert.assertTrue(datanodeinfo.length > 0);
//get DataNode and FSDataset objects
final DataNode datanode = cluster.getDataNode(datanodeinfo[0].getIpcPort());
Assert.assertTrue(datanode != null);
//initReplicaRecovery
final ExtendedBlock b = locatedblock.getBlock();
final long recoveryid = b.getGenerationStamp() + 1;
final long newlength = b.getNumBytes() - 1;
final FsDatasetSpi<?> fsdataset = DataNodeTestUtils.getFSDataset(datanode);
final ReplicaRecoveryInfo rri = fsdataset.initReplicaRecovery(new RecoveringBlock(b, null, recoveryid));
//check replica
final Replica replica = cluster.getFsDatasetTestUtils(datanode).fetchReplica(b);
Assert.assertEquals(ReplicaState.RUR, replica.getState());
//check meta data before update
cluster.getFsDatasetTestUtils(datanode).checkStoredReplica(replica);
//case "THIS IS NOT SUPPOSED TO HAPPEN"
//with (block length) != (stored replica's on disk length).
{
//create a block with same id and gs but different length.
final ExtendedBlock tmp = new ExtendedBlock(b.getBlockPoolId(), rri.getBlockId(), rri.getNumBytes() - 1, rri.getGenerationStamp());
try {
//update should fail
fsdataset.updateReplicaUnderRecovery(tmp, recoveryid, tmp.getBlockId(), newlength);
Assert.fail();
} catch (IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
//update
final Replica r = fsdataset.updateReplicaUnderRecovery(new ExtendedBlock(b.getBlockPoolId(), rri), recoveryid, rri.getBlockId(), newlength);
assertTrue(r != null);
assertTrue(r.getStorageUuid() != null);
} finally {
if (cluster != null)
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestNameNodeMetadataConsistency method testGenerationStampInFuture.
/**
* This test creates a file and modifies the block generation stamp to number
* that name node has not seen yet. It then asserts that name node moves into
* safe mode while it is in startup mode.
*/
@Test
public void testGenerationStampInFuture() throws Exception {
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
OutputStream ostream = fs.create(filePath1);
ostream.write(TEST_DATA_IN_FUTURE.getBytes());
ostream.close();
// Re-write the Generation Stamp to a Generation Stamp in future.
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath1);
final long genStamp = block.getGenerationStamp();
final int datanodeIndex = 0;
cluster.changeGenStampOfBlock(datanodeIndex, block, genStamp + 1);
// stop the data node so that it won't remove block
final DataNodeProperties dnProps = cluster.stopDataNode(datanodeIndex);
// Simulate Namenode forgetting a Block
cluster.restartNameNode(true);
cluster.getNameNode().getNamesystem().writeLock();
BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
bInfo.delete();
cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
cluster.getNameNode().getNamesystem().writeUnlock();
// we also need to tell block manager that we are in the startup path
BlockManagerTestUtil.setStartupSafeModeForTest(cluster.getNameNode().getNamesystem().getBlockManager());
cluster.restartDataNode(dnProps);
waitForNumBytes(TEST_DATA_IN_FUTURE.length());
// Make sure that we find all written bytes in future block
assertEquals(TEST_DATA_IN_FUTURE.length(), cluster.getNameNode().getBytesWithFutureGenerationStamps());
// Assert safemode reason
assertTrue(cluster.getNameNode().getNamesystem().getSafeModeTip().contains("Name node detected blocks with generation stamps in future"));
}
use of org.apache.hadoop.hdfs.protocol.ExtendedBlock in project hadoop by apache.
the class TestNameNodeMetadataConsistency method testEnsureGenStampsIsStartupOnly.
/**
* Pretty much the same tests as above but does not setup safeMode == true,
* hence we should not have positive count of Blocks in future.
*/
@Test
public void testEnsureGenStampsIsStartupOnly() throws Exception {
String testData = " This is test data";
cluster.restartDataNodes();
cluster.restartNameNodes();
cluster.waitActive();
FileSystem fs = cluster.getFileSystem();
OutputStream ostream = fs.create(filePath2);
ostream.write(testData.getBytes());
ostream.close();
ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, filePath2);
long genStamp = block.getGenerationStamp();
// Re-write the Generation Stamp to a Generation Stamp in future.
cluster.changeGenStampOfBlock(0, block, genStamp + 1);
MiniDFSCluster.DataNodeProperties dnProps = cluster.stopDataNode(0);
// Simulate Namenode forgetting a Block
cluster.restartNameNode(true);
BlockInfo bInfo = cluster.getNameNode().getNamesystem().getBlockManager().getStoredBlock(block.getLocalBlock());
cluster.getNameNode().getNamesystem().writeLock();
bInfo.delete();
cluster.getNameNode().getNamesystem().getBlockManager().removeBlock(bInfo);
cluster.getNameNode().getNamesystem().writeUnlock();
cluster.restartDataNode(dnProps);
waitForNumBytes(0);
// Make sure that there are no bytes in future since isInStartupSafe
// mode is not true.
assertEquals(0, cluster.getNameNode().getBytesWithFutureGenerationStamps());
}
Aggregations