use of org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB in project ozone by apache.
the class TestContainerReader method markBlocksForDelete.
private void markBlocksForDelete(KeyValueContainer keyValueContainer, boolean setMetaData, List<Long> blockNames, int count) throws Exception {
try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer.getContainerData(), conf)) {
for (int i = 0; i < count; i++) {
Table<String, BlockData> blockDataTable = metadataStore.getStore().getBlockDataTable();
String blk = Long.toString(blockNames.get(i));
BlockData blkInfo = blockDataTable.get(blk);
blockDataTable.delete(blk);
blockDataTable.put(OzoneConsts.DELETING_KEY_PREFIX + blk, blkInfo);
}
if (setMetaData) {
// Pending delete blocks are still counted towards the block count
// and bytes used metadata values, so those do not change.
Table<String, Long> metadataTable = metadataStore.getStore().getMetadataTable();
metadataTable.put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, (long) count);
}
}
}
use of org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB in project ozone by apache.
the class TestBlockDeletingService method testBlockDeletion.
@Test
public void testBlockDeletion() throws Exception {
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
dnConf.setBlockDeletionLimit(2);
this.blockLimitPerInterval = dnConf.getBlockDeletionLimit();
conf.setFromObject(dnConf);
ContainerSet containerSet = new ContainerSet();
createToDeleteBlocks(containerSet, 1, 3, 1);
ContainerMetrics metrics = ContainerMetrics.create(conf);
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> {
});
BlockDeletingServiceTestImpl svc = getBlockDeletingService(containerSet, conf, keyValueHandler);
svc.start();
GenericTestUtils.waitFor(svc::isStarted, 100, 3000);
// Ensure 1 container was created
List<ContainerData> containerData = Lists.newArrayList();
containerSet.listContainer(0L, 1, containerData);
KeyValueContainerData data = (KeyValueContainerData) containerData.get(0);
Assert.assertEquals(1, containerData.size());
try (ReferenceCountedDB meta = BlockUtils.getDB((KeyValueContainerData) containerData.get(0), conf)) {
Map<Long, Container<?>> containerMap = containerSet.getContainerMapCopy();
// NOTE: this test assumes that all the container is KetValueContainer and
// have DeleteTransactionId in KetValueContainerData. If other
// types is going to be added, this test should be checked.
long transactionId = ((KeyValueContainerData) containerMap.get(containerData.get(0).getContainerID()).getContainerData()).getDeleteTransactionId();
long containerSpace = containerData.get(0).getBytesUsed();
// Number of deleted blocks in container should be equal to 0 before
// block delete
Assert.assertEquals(0, transactionId);
// Ensure there are 3 blocks under deletion and 0 deleted blocks
Assert.assertEquals(3, getUnderDeletionBlocksCount(meta, data));
Assert.assertEquals(3, meta.getStore().getMetadataTable().get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue());
// Container contains 3 blocks. So, space used by the container
// should be greater than zero.
Assert.assertTrue(containerSpace > 0);
// An interval will delete 1 * 2 blocks
deleteAndWait(svc, 1);
GenericTestUtils.waitFor(() -> containerData.get(0).getBytesUsed() == containerSpace / 3, 100, 3000);
// After first interval 2 blocks will be deleted. Hence, current space
// used by the container should be less than the space used by the
// container initially(before running deletion services).
Assert.assertTrue(containerData.get(0).getBytesUsed() < containerSpace);
deleteAndWait(svc, 2);
// After deletion of all 3 blocks, space used by the containers
// should be zero.
GenericTestUtils.waitFor(() -> containerData.get(0).getBytesUsed() == 0, 100, 3000);
// Check finally DB counters.
// Not checking bytes used, as handler is a mock call.
Assert.assertEquals(0, meta.getStore().getMetadataTable().get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT).longValue());
Assert.assertEquals(0, meta.getStore().getMetadataTable().get(OzoneConsts.BLOCK_COUNT).longValue());
}
svc.shutdown();
}
use of org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB in project ozone by apache.
the class TestSchemaOneBackwardsCompatibility method testReadBlockData.
@Test
public void testReadBlockData() throws Exception {
try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
Table<String, BlockData> blockDataTable = refCountedDB.getStore().getBlockDataTable();
// Test encoding keys and decoding database values.
for (String blockID : TestDB.BLOCK_IDS) {
BlockData blockData = blockDataTable.get(blockID);
Assert.assertEquals(Long.toString(blockData.getLocalID()), blockID);
}
// Test decoding keys from the database.
List<? extends Table.KeyValue<String, BlockData>> blockKeyValues = blockDataTable.getRangeKVs(null, 100, MetadataKeyFilters.getUnprefixedKeyFilter());
List<String> decodedKeys = new ArrayList<>();
for (Table.KeyValue<String, BlockData> blockDataKV : blockKeyValues) {
decodedKeys.add(blockDataKV.getKey());
}
Assert.assertEquals(TestDB.BLOCK_IDS, decodedKeys);
// Test reading blocks with block iterator.
try (BlockIterator<BlockData> iter = refCountedDB.getStore().getBlockIterator()) {
List<String> iteratorBlockIDs = new ArrayList<>();
while (iter.hasNext()) {
long localID = iter.nextBlock().getBlockID().getLocalID();
iteratorBlockIDs.add(Long.toString(localID));
}
Assert.assertEquals(TestDB.BLOCK_IDS, iteratorBlockIDs);
}
}
}
use of org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB in project ozone by apache.
the class TestSchemaOneBackwardsCompatibility method testReadWithoutMetadata.
/**
* Tests reading of a container that was written in schema version 1, when
* the container has no metadata keys present.
* The {@link KeyValueContainerUtil} will scan the blocks in the database
* to fill these metadata values into the database and into a
* {@link KeyValueContainerData} object.
* @throws Exception
*/
@Test
public void testReadWithoutMetadata() throws Exception {
// This simulates them not being there to start with.
try (ReferenceCountedDB db = BlockUtils.getDB(newKvData(), conf)) {
Table<String, Long> metadataTable = db.getStore().getMetadataTable();
metadataTable.delete(OzoneConsts.BLOCK_COUNT);
assertNull(metadataTable.get(OzoneConsts.BLOCK_COUNT));
metadataTable.delete(OzoneConsts.CONTAINER_BYTES_USED);
assertNull(metadataTable.get(OzoneConsts.CONTAINER_BYTES_USED));
metadataTable.delete(OzoneConsts.PENDING_DELETE_BLOCK_COUNT);
assertNull(metadataTable.get(OzoneConsts.PENDING_DELETE_BLOCK_COUNT));
}
// Create a new container data object, and fill in its metadata by
// counting blocks from the database, since the metadata keys in the
// database are now gone.
checkContainerData(newKvData());
}
use of org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB in project ozone by apache.
the class TestSchemaOneBackwardsCompatibility method testReadDeletedBlocks.
@Test
public void testReadDeletedBlocks() throws Exception {
try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
Table<String, ChunkInfoList> deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable();
for (String blockID : TestDB.DELETED_BLOCK_IDS) {
// Since chunk info for deleted blocks was not stored in schema
// version 1, there is no value to retrieve here.
Assert.assertTrue(deletedBlocksTable.isExist(blockID));
}
// Test decoding keys from the database.
List<? extends Table.KeyValue<String, ChunkInfoList>> chunkInfoKeyValues = deletedBlocksTable.getRangeKVs(null, 100);
List<String> decodedKeys = new ArrayList<>();
for (Table.KeyValue<String, ChunkInfoList> kv : chunkInfoKeyValues) {
decodedKeys.add(kv.getKey());
}
Assert.assertEquals(TestDB.DELETED_BLOCK_IDS, decodedKeys);
}
}
Aggregations