use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class TestInterDatanodeProtocol method testInitReplicaRecovery.
/** Test
* {@link FsDatasetImpl#initReplicaRecovery(String, ReplicaMap, Block, long, long)}
*/
@Test
public void testInitReplicaRecovery() throws IOException {
final long firstblockid = 10000L;
final long gs = 7777L;
final long length = 22L;
final ReplicaMap map = new ReplicaMap(new AutoCloseableLock());
String bpid = "BP-TEST";
final Block[] blocks = new Block[5];
for (int i = 0; i < blocks.length; i++) {
blocks[i] = new Block(firstblockid + i, length, gs);
map.add(bpid, createReplicaInfo(blocks[i]));
}
{
//normal case
final Block b = blocks[0];
final ReplicaInfo originalInfo = map.get(bpid, b);
final long recoveryid = gs + 1;
final ReplicaRecoveryInfo recoveryInfo = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo, recoveryInfo);
final ReplicaUnderRecovery updatedInfo = (ReplicaUnderRecovery) map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo.getBlockId());
Assert.assertEquals(recoveryid, updatedInfo.getRecoveryID());
//recover one more time
final long recoveryid2 = gs + 2;
final ReplicaRecoveryInfo recoveryInfo2 = FsDatasetImpl.initReplicaRecovery(bpid, map, blocks[0], recoveryid2, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
assertEquals(originalInfo, recoveryInfo2);
final ReplicaUnderRecovery updatedInfo2 = (ReplicaUnderRecovery) map.get(bpid, b);
Assert.assertEquals(originalInfo.getBlockId(), updatedInfo2.getBlockId());
Assert.assertEquals(recoveryid2, updatedInfo2.getRecoveryID());
//case RecoveryInProgressException
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
} catch (RecoveryInProgressException ripe) {
System.out.println("GOOD: getting " + ripe);
}
}
{
// BlockRecoveryFI_01: replica not found
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid - 1, length, gs);
ReplicaRecoveryInfo r = FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.assertNull("Data-node should not have this replica.", r);
}
{
// BlockRecoveryFI_02: "THIS IS NOT SUPPOSED TO HAPPEN" with recovery id < gs
final long recoveryid = gs - 1;
final Block b = new Block(firstblockid + 1, length, gs);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
Assert.fail();
} catch (IOException ioe) {
System.out.println("GOOD: getting " + ioe);
}
}
// BlockRecoveryFI_03: Replica's gs is less than the block's gs
{
final long recoveryid = gs + 1;
final Block b = new Block(firstblockid, length, gs + 1);
try {
FsDatasetImpl.initReplicaRecovery(bpid, map, b, recoveryid, DFSConfigKeys.DFS_DATANODE_XCEIVER_STOP_TIMEOUT_MILLIS_DEFAULT);
fail("InitReplicaRecovery should fail because replica's " + "gs is less than the block's gs");
} catch (IOException e) {
e.getMessage().startsWith("replica.getGenerationStamp() < block.getGenerationStamp(), block=");
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class TestDatanodeRestart method testRbwReplicas.
private void testRbwReplicas(MiniDFSCluster cluster, boolean isCorrupt) throws IOException {
FSDataOutputStream out = null;
FileSystem fs = cluster.getFileSystem();
final Path src = new Path("/test.txt");
try {
final int fileLen = 515;
// create some rbw replicas on disk
byte[] writeBuf = new byte[fileLen];
new Random().nextBytes(writeBuf);
out = fs.create(src);
out.write(writeBuf);
out.hflush();
DataNode dn = cluster.getDataNodes().get(0);
try (FsDatasetSpi.FsVolumeReferences volumes = dataset(dn).getFsVolumeReferences()) {
for (FsVolumeSpi vol : volumes) {
final FsVolumeImpl volume = (FsVolumeImpl) vol;
File currentDir = volume.getCurrentDir().getParentFile().getParentFile();
File rbwDir = new File(currentDir, "rbw");
for (File file : rbwDir.listFiles()) {
if (isCorrupt && Block.isBlockFilename(file)) {
new RandomAccessFile(file, "rw").setLength(// corrupt
fileLen - 1);
}
}
}
}
cluster.restartDataNodes();
cluster.waitActive();
dn = cluster.getDataNodes().get(0);
// check volumeMap: one rwr replica
String bpid = cluster.getNamesystem().getBlockPoolId();
ReplicaMap replicas = dataset(dn).volumeMap;
Assert.assertEquals(1, replicas.size(bpid));
ReplicaInfo replica = replicas.replicas(bpid).iterator().next();
Assert.assertEquals(ReplicaState.RWR, replica.getState());
if (isCorrupt) {
Assert.assertEquals((fileLen - 1) / 512 * 512, replica.getNumBytes());
} else {
Assert.assertEquals(fileLen, replica.getNumBytes());
}
dataset(dn).invalidate(bpid, new Block[] { replica });
} finally {
IOUtils.closeStream(out);
if (fs.exists(src)) {
fs.delete(src, false);
}
fs.close();
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class TestFsDatasetImpl method testDuplicateReplicaResolution.
@Test
public void testDuplicateReplicaResolution() throws IOException {
FsVolumeImpl fsv1 = Mockito.mock(FsVolumeImpl.class);
FsVolumeImpl fsv2 = Mockito.mock(FsVolumeImpl.class);
File f1 = new File("d1/block");
File f2 = new File("d2/block");
ReplicaInfo replicaOlder = new FinalizedReplica(1, 1, 1, fsv1, f1);
ReplicaInfo replica = new FinalizedReplica(1, 2, 2, fsv1, f1);
ReplicaInfo replicaSame = new FinalizedReplica(1, 2, 2, fsv1, f1);
ReplicaInfo replicaNewer = new FinalizedReplica(1, 3, 3, fsv1, f1);
ReplicaInfo replicaOtherOlder = new FinalizedReplica(1, 1, 1, fsv2, f2);
ReplicaInfo replicaOtherSame = new FinalizedReplica(1, 2, 2, fsv2, f2);
ReplicaInfo replicaOtherNewer = new FinalizedReplica(1, 3, 3, fsv2, f2);
// equivalent path so don't remove either
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaSame, replica));
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaOlder, replica));
assertNull(BlockPoolSlice.selectReplicaToDelete(replicaNewer, replica));
// keep latest found replica
assertSame(replica, BlockPoolSlice.selectReplicaToDelete(replicaOtherSame, replica));
assertSame(replicaOtherOlder, BlockPoolSlice.selectReplicaToDelete(replicaOtherOlder, replica));
assertSame(replica, BlockPoolSlice.selectReplicaToDelete(replicaOtherNewer, replica));
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class TestWriteToReplica method testEqualityOfReplicaMap.
/**
* Compare the replica map before and after the restart
**/
private void testEqualityOfReplicaMap(ReplicaMap oldReplicaMap, ReplicaMap newReplicaMap, List<String> bpidList) {
// replicaInfo from oldReplicaMap.
for (String bpid : bpidList) {
for (ReplicaInfo info : newReplicaMap.replicas(bpid)) {
assertNotNull("Volume map before restart didn't contain the " + "blockpool: " + bpid, oldReplicaMap.replicas(bpid));
ReplicaInfo oldReplicaInfo = oldReplicaMap.get(bpid, info.getBlockId());
// Volume map after restart contains a blockpool id which
assertNotNull("Old Replica Map didnt't contain block with blockId: " + info.getBlockId(), oldReplicaInfo);
ReplicaState oldState = oldReplicaInfo.getState();
// converted to RWR
if (info.getState() == ReplicaState.RWR) {
if (oldState == ReplicaState.RWR || oldState == ReplicaState.RBW || oldState == ReplicaState.RUR) {
oldReplicaMap.remove(bpid, oldReplicaInfo);
}
} else if (info.getState() == ReplicaState.FINALIZED && oldState == ReplicaState.FINALIZED) {
oldReplicaMap.remove(bpid, oldReplicaInfo);
}
}
}
// then we didn't persist that replica
for (String bpid : bpidList) {
for (ReplicaInfo replicaInfo : oldReplicaMap.replicas(bpid)) {
if (replicaInfo.getState() != ReplicaState.TEMPORARY) {
Assert.fail("After datanode restart we lost the block with blockId: " + replicaInfo.getBlockId());
}
}
}
}
use of org.apache.hadoop.hdfs.server.datanode.ReplicaInfo in project hadoop by apache.
the class FsDatasetImplTestUtils method checkStoredReplica.
@Override
public void checkStoredReplica(Replica replica) throws IOException {
Preconditions.checkArgument(replica instanceof ReplicaInfo);
ReplicaInfo r = (ReplicaInfo) replica;
FsDatasetImpl.checkReplicaFiles(r);
}
Aggregations