use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestFsDatasetImpl method testDeletingBlocks.
@Test
public void testDeletingBlocks() throws IOException {
HdfsConfiguration conf = new HdfsConfiguration();
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
try {
cluster.waitActive();
DataNode dn = cluster.getDataNodes().get(0);
FsDatasetSpi<?> ds = DataNodeTestUtils.getFSDataset(dn);
ds.addBlockPool(BLOCKPOOL, conf);
FsVolumeImpl vol;
try (FsDatasetSpi.FsVolumeReferences volumes = ds.getFsVolumeReferences()) {
vol = (FsVolumeImpl) volumes.get(0);
}
ExtendedBlock eb;
ReplicaInfo info;
List<Block> blockList = new ArrayList<>();
for (int i = 1; i <= 63; i++) {
eb = new ExtendedBlock(BLOCKPOOL, i, 1, 1000 + i);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
}
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertTrue(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
blockList.clear();
eb = new ExtendedBlock(BLOCKPOOL, 64, 1, 1064);
cluster.getFsDatasetTestUtils(0).createFinalizedReplica(eb);
blockList.add(eb.getLocalBlock());
ds.invalidate(BLOCKPOOL, blockList.toArray(new Block[0]));
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// Nothing to do
}
assertFalse(ds.isDeletingBlock(BLOCKPOOL, blockList.get(0).getBlockId()));
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class FsDatasetImplTestUtils method createRBW.
@Override
public Replica createRBW(FsVolumeSpi volume, ExtendedBlock eb) throws IOException {
FsVolumeImpl vol = (FsVolumeImpl) volume;
final String bpid = eb.getBlockPoolId();
final Block block = eb.getLocalBlock();
ReplicaBeingWritten rbw = new ReplicaBeingWritten(eb.getLocalBlock(), volume, vol.createRbwFile(bpid, block).getParentFile(), null);
rbw.getBlockFile().createNewFile();
rbw.getMetaFile().createNewFile();
dataset.volumeMap.add(bpid, rbw);
return rbw;
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestSimulatedFSDataset method testInjectionEmpty.
@Test
public void testInjectionEmpty() throws IOException {
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
// Inject blocks into an empty fsdataset
// - injecting the blocks we got above.
SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
sfsdataset.injectBlocks(bpid, blockReport);
blockReport = sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(new ExtendedBlock(bpid, b)));
}
assertEquals(bytesAdded, sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity() - bytesAdded, sfsdataset.getRemaining());
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestSimulatedFSDataset method testInvalidate.
@Test
public void testInvalidate() throws IOException {
final SimulatedFSDataset fsdataset = getSimulatedFSDataset();
int bytesAdded = addSomeBlocks(fsdataset);
Block[] deleteBlocks = new Block[2];
deleteBlocks[0] = new Block(1, 0, 0);
deleteBlocks[1] = new Block(2, 0, 0);
fsdataset.invalidate(bpid, deleteBlocks);
checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[0]));
checkInvalidBlock(new ExtendedBlock(bpid, deleteBlocks[1]));
long sizeDeleted = blockIdToLen(1) + blockIdToLen(2);
assertEquals(bytesAdded - sizeDeleted, fsdataset.getDfsUsed());
assertEquals(fsdataset.getCapacity() - bytesAdded + sizeDeleted, fsdataset.getRemaining());
// Now make sure the rest of the blocks are valid
for (int i = 3; i <= NUMBLOCKS; ++i) {
Block b = new Block(i, 0, 0);
assertTrue(fsdataset.isValidBlock(new ExtendedBlock(bpid, b)));
}
}
use of org.apache.hadoop.hdfs.protocol.Block in project hadoop by apache.
the class TestSimulatedFSDataset method testInjectionNonEmpty.
@Test
public void testInjectionNonEmpty() throws IOException {
SimulatedFSDataset fsdataset = getSimulatedFSDataset();
BlockListAsLongs blockReport = fsdataset.getBlockReport(bpid);
assertEquals(0, blockReport.getNumberOfBlocks());
int bytesAdded = addSomeBlocks(fsdataset);
blockReport = fsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
for (Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
}
fsdataset = null;
// Inject blocks into an non-empty fsdataset
// - injecting the blocks we got above.
SimulatedFSDataset sfsdataset = getSimulatedFSDataset();
// Add come blocks whose block ids do not conflict with
// the ones we are going to inject.
bytesAdded += addSomeBlocks(sfsdataset, NUMBLOCKS + 1, false);
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS, blockReport.getNumberOfBlocks());
sfsdataset.injectBlocks(bpid, blockReport);
blockReport = sfsdataset.getBlockReport(bpid);
assertEquals(NUMBLOCKS * 2, blockReport.getNumberOfBlocks());
for (Block b : blockReport) {
assertNotNull(b);
assertEquals(blockIdToLen(b.getBlockId()), b.getNumBytes());
assertEquals(blockIdToLen(b.getBlockId()), sfsdataset.getLength(new ExtendedBlock(bpid, b)));
}
assertEquals(bytesAdded, sfsdataset.getDfsUsed());
assertEquals(sfsdataset.getCapacity() - bytesAdded, sfsdataset.getRemaining());
// Now test that the dataset cannot be created if it does not have sufficient cap
conf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY, 10);
try {
sfsdataset = getSimulatedFSDataset();
sfsdataset.addBlockPool(bpid, conf);
sfsdataset.injectBlocks(bpid, blockReport);
assertTrue("Expected an IO exception", false);
} catch (IOException e) {
// ok - as expected
}
}
Aggregations