use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class TestReplicaMap method testRemove.
@Test
public void testRemove() {
// Test 1: null argument throws invalid argument exception
try {
map.remove(bpid, null);
fail("Expected exception not thrown");
} catch (IllegalArgumentException expected) {
}
// Test 2: remove failure - generation stamp mismatch
Block b = new Block(block);
b.setGenerationStamp(0);
assertNull(map.remove(bpid, b));
// Test 3: remove failure - blockID mismatch
b.setGenerationStamp(block.getGenerationStamp());
b.setBlockId(0);
assertNull(map.remove(bpid, b));
// Test 4: remove success
assertNotNull(map.remove(bpid, block));
// Test 5: remove failure - invalid blockID
assertNull(map.remove(bpid, 0));
// Test 6: remove success
map.add(bpid, new FinalizedReplica(block, null, null));
assertNotNull(map.remove(bpid, block.getBlockId()));
}
use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class FsDatasetImplTestUtils method injectCorruptReplica.
@Override
public void injectCorruptReplica(ExtendedBlock block) throws IOException {
Preconditions.checkState(!dataset.contains(block), "Block " + block + " already exists on dataset.");
try (FsVolumeReferences volRef = dataset.getFsVolumeReferences()) {
FsVolumeImpl volume = (FsVolumeImpl) volRef.get(0);
FinalizedReplica finalized = new FinalizedReplica(block.getLocalBlock(), volume, volume.getFinalizedDir(block.getBlockPoolId()));
File blockFile = finalized.getBlockFile();
if (!blockFile.createNewFile()) {
throw new FileExistsException("Block file " + blockFile + " already exists.");
}
File metaFile = FsDatasetUtil.getMetaFile(blockFile, 1000);
if (!metaFile.createNewFile()) {
throw new FileExistsException("Meta file " + metaFile + " already exists.");
}
dataset.volumeMap.add(block.getBlockPoolId(), finalized);
}
}
use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class FsDatasetImplTestUtils method createFinalizedReplica.
@Override
public Replica createFinalizedReplica(FsVolumeSpi volume, ExtendedBlock block) throws IOException {
FsVolumeImpl vol = (FsVolumeImpl) volume;
FinalizedReplica info = new FinalizedReplica(block.getLocalBlock(), vol, vol.getCurrentDir().getParentFile());
dataset.volumeMap.add(block.getBlockPoolId(), info);
info.getBlockFile().createNewFile();
info.getMetaFile().createNewFile();
saveMetaFileHeader(info.getMetaFile());
return info;
}
use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class FsDatasetImplTestUtils method createReplicaUnderRecovery.
@Override
public Replica createReplicaUnderRecovery(ExtendedBlock block, long recoveryId) throws IOException {
try (FsVolumeReferences volumes = dataset.getFsVolumeReferences()) {
FsVolumeImpl volume = (FsVolumeImpl) volumes.get(0);
ReplicaUnderRecovery rur = new ReplicaUnderRecovery(new FinalizedReplica(block.getLocalBlock(), volume, volume.getCurrentDir().getParentFile()), recoveryId);
dataset.volumeMap.add(block.getBlockPoolId(), rur);
return rur;
}
}
use of org.apache.hadoop.hdfs.server.datanode.FinalizedReplica in project hadoop by apache.
the class TestBlockListAsLongs method testMix.
@Test
public void testMix() {
BlockListAsLongs blocks = checkReport(new FinalizedReplica(b1, null, null), new FinalizedReplica(b2, null, null), new ReplicaBeingWritten(b3, null, null, null), new ReplicaWaitingToBeRecovered(b4, null, null));
assertArrayEquals(new long[] { 2, 2, 1, 11, 111, 2, 22, 222, -1, -1, -1, 3, 33, 333, ReplicaState.RBW.getValue(), 4, 44, 444, ReplicaState.RWR.getValue() }, blocks.getBlockListAsLongs());
}
Aggregations