use of org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl in project ozone by apache.
the class TestBlockDeletingService method testBlockDeletion.
@Test
public void testBlockDeletion() throws Exception {
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
dnConf.setBlockDeletionLimit(2);
this.blockLimitPerInterval = dnConf.getBlockDeletionLimit();
conf.setFromObject(dnConf);
ContainerSet containerSet = new ContainerSet();
createToDeleteBlocks(containerSet, 1, 3, 1);
ContainerMetrics metrics = ContainerMetrics.create(conf);
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> {
});
BlockDeletingServiceTestImpl svc = getBlockDeletingService(containerSet, conf, keyValueHandler);
svc.start();
GenericTestUtils.waitFor(svc::isStarted, 100, 3000);
// Ensure 1 container was created
List<ContainerData> containerData = Lists.newArrayList();
containerSet.listContainer(0L, 1, containerData);
KeyValueContainerData data = (KeyValueContainerData) containerData.get(0);
Assert.assertEquals(1, containerData.size());
try (ReferenceCountedDB meta = BlockUtils.getDB((KeyValueContainerData) containerData.get(0), conf)) {
Map<Long, Container<?>> containerMap = containerSet.getContainerMapCopy();
// NOTE: this test assumes that all the container is KetValueContainer and
// have DeleteTransactionId in KetValueContainerData. If other
// types is going to be added, this test should be checked.
long transactionId = ((KeyValueContainerData) containerMap.get(containerData.get(0).getContainerID()).getContainerData()).getDeleteTransactionId();
long containerSpace = containerData.get(0).getBytesUsed();
// Number of deleted blocks in container should be equal to 0 before
// block delete
Assert.assertEquals(0, transactionId);
// Ensure there are 3 blocks under deletion and 0 deleted blocks
Assert.assertEquals(3, getUnderDeletionBlocksCount(meta, data));
Assert.assertEquals(3, meta.getStore().getMetadataTable().get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue());
// Container contains 3 blocks. So, space used by the container
// should be greater than zero.
Assert.assertTrue(containerSpace > 0);
// An interval will delete 1 * 2 blocks
deleteAndWait(svc, 1);
GenericTestUtils.waitFor(() -> containerData.get(0).getBytesUsed() == containerSpace / 3, 100, 3000);
// After first interval 2 blocks will be deleted. Hence, current space
// used by the container should be less than the space used by the
// container initially(before running deletion services).
Assert.assertTrue(containerData.get(0).getBytesUsed() < containerSpace);
deleteAndWait(svc, 2);
// After deletion of all 3 blocks, space used by the containers
// should be zero.
GenericTestUtils.waitFor(() -> containerData.get(0).getBytesUsed() == 0, 100, 3000);
// Check finally DB counters.
// Not checking bytes used, as handler is a mock call.
Assert.assertEquals(0, meta.getStore().getMetadataTable().get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue());
Assert.assertEquals(0, meta.getStore().getMetadataTable().get(OzoneConsts.BLOCK_COUNT).longValue());
}
svc.shutdown();
}
use of org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl in project ozone by apache.
the class TestBlockDeletingService method testShutdownService.
@Test
// waitFor => assertion with timeout
@SuppressWarnings("java:S2699")
public void testShutdownService() throws Exception {
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500, TimeUnit.MILLISECONDS);
conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 10);
ContainerSet containerSet = new ContainerSet();
// Create 1 container with 100 blocks
createToDeleteBlocks(containerSet, 1, 100, 1);
ContainerMetrics metrics = ContainerMetrics.create(conf);
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> {
});
BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler);
service.start();
GenericTestUtils.waitFor(service::isStarted, 100, 3000);
// Run some deleting tasks and verify there are threads running
service.runDeletingTasks();
GenericTestUtils.waitFor(() -> service.getThreadCount() > 0, 100, 1000);
// Shutdown service and verify all threads are stopped
service.shutdown();
GenericTestUtils.waitFor(() -> service.getThreadCount() == 0, 100, 1000);
}
use of org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl in project ozone by apache.
the class TestSchemaOneBackwardsCompatibility method runBlockDeletingService.
private void runBlockDeletingService(KeyValueHandler keyValueHandler) throws Exception {
conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
conf.setInt(OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
OzoneContainer container = makeMockOzoneContainer(keyValueHandler);
BlockDeletingServiceTestImpl service = new BlockDeletingServiceTestImpl(container, 1000, conf);
service.start();
GenericTestUtils.waitFor(service::isStarted, 100, 3000);
service.runDeletingTasks();
GenericTestUtils.waitFor(() -> service.getTimesOfProcessed() == 1, 100, 3000);
}
use of org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl in project ozone by apache.
the class TestBlockDeletingService method testContainerThrottle.
@Test(timeout = 30000)
@org.junit.Ignore
public void testContainerThrottle() throws Exception {
// Properties :
// - Number of containers : 2
// - Number of blocks per container : 1
// - Number of chunks per block : 10
// - Container limit per interval : 1
// - Block limit per container : 1
//
// Each time only 1 container can be processed, so each time
// 1 block from 1 container can be deleted.
// Process 1 container per interval
conf.set(ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY, TopNOrderedContainerDeletionChoosingPolicy.class.getName());
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
dnConf.setBlockDeletionLimit(1);
this.blockLimitPerInterval = dnConf.getBlockDeletionLimit();
conf.setFromObject(dnConf);
ContainerSet containerSet = new ContainerSet();
int containerCount = 2;
int chunksPerBlock = 10;
int blocksPerContainer = 1;
createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, chunksPerBlock);
ContainerMetrics metrics = ContainerMetrics.create(conf);
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> {
});
BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler);
service.start();
List<ContainerData> containerData = Lists.newArrayList();
containerSet.listContainer(0L, containerCount, containerData);
try {
GenericTestUtils.waitFor(service::isStarted, 100, 3000);
// Deleting one of the two containers and its single block.
// Hence, space used by the container of whose block has been
// deleted should be zero.
deleteAndWait(service, 1);
GenericTestUtils.waitFor(() -> (containerData.get(0).getBytesUsed() == 0 || containerData.get(1).getBytesUsed() == 0), 100, 3000);
Assert.assertFalse((containerData.get(0).getBytesUsed() == 0) && (containerData.get(1).getBytesUsed() == 0));
// Deleting the second container. Hence, space used by both the
// containers should be zero.
deleteAndWait(service, 2);
GenericTestUtils.waitFor(() -> (containerData.get(0).getBytesUsed() == 0 && containerData.get(1).getBytesUsed() == 0), 100, 3000);
} finally {
service.shutdown();
}
}
use of org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl in project ozone by apache.
the class TestBlockDeletingService method testBlockThrottle.
@Test(timeout = 30000)
public void testBlockThrottle() throws Exception {
// Properties :
// - Number of containers : 5
// - Number of blocks per container : 3
// - Number of chunks per block : 1
// - Container limit per interval : 10
// - Block limit per container : 2
//
// Each time containers can be all scanned, but only 10 blocks
// can be actually deleted. So it requires 2 waves
// to cleanup all the 15 blocks.
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
dnConf.setBlockDeletionLimit(10);
this.blockLimitPerInterval = dnConf.getBlockDeletionLimit();
conf.setFromObject(dnConf);
ContainerSet containerSet = new ContainerSet();
ContainerMetrics metrics = ContainerMetrics.create(conf);
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> {
});
int containerCount = 5;
int blocksPerContainer = 3;
createToDeleteBlocks(containerSet, containerCount, blocksPerContainer, 1);
BlockDeletingServiceTestImpl service = getBlockDeletingService(containerSet, conf, keyValueHandler);
service.start();
List<ContainerData> containerData = Lists.newArrayList();
containerSet.listContainer(0L, containerCount, containerData);
long blockSpace = containerData.get(0).getBytesUsed() / blocksPerContainer;
long totalContainerSpace = containerCount * containerData.get(0).getBytesUsed();
try {
GenericTestUtils.waitFor(service::isStarted, 100, 3000);
// Total blocks = 3 * 5 = 15
// blockLimitPerInterval = 10
// each interval will at most runDeletingTasks = 10 blocks
// but as per of deletion policy (random/topNorder), it will fetch all 3
// blocks from first 3 containers and 1 block from last container.
// C1 - 3 BLOCKS, C2 - 3 BLOCKS, C3 - 3 BLOCKS, C4 - 1 BLOCK
// Deleted space of 10 blocks should be equal to (initial total space
// of container - current total space of container).
deleteAndWait(service, 1);
GenericTestUtils.waitFor(() -> blockLimitPerInterval * blockSpace == (totalContainerSpace - currentBlockSpace(containerData, containerCount)), 100, 3000);
// There is only 5 blocks left to runDeletingTasks
// (Deleted space of previous 10 blocks + these left 5 blocks) should
// be equal to (initial total space of container
// - current total space of container(it will be zero as all blocks
// in all the containers are deleted)).
deleteAndWait(service, 2);
long totalContainerBlocks = blocksPerContainer * containerCount;
GenericTestUtils.waitFor(() -> totalContainerBlocks * blockSpace == (totalContainerSpace - currentBlockSpace(containerData, containerCount)), 100, 3000);
} finally {
service.shutdown();
}
}
Aggregations