use of org.apache.hadoop.ozone.container.common.interfaces.BlockIterator in project ozone by apache.
the class TestSchemaOneBackwardsCompatibility method testReadDeletingBlockData.
@Test
public void testReadDeletingBlockData() throws Exception {
try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
Table<String, BlockData> blockDataTable = refCountedDB.getStore().getBlockDataTable();
for (String blockID : TestDB.DELETING_BLOCK_IDS) {
BlockData blockData = blockDataTable.get(OzoneConsts.DELETING_KEY_PREFIX + blockID);
Assert.assertEquals(Long.toString(blockData.getLocalID()), blockID);
}
// Test decoding keys from the database.
List<? extends Table.KeyValue<String, BlockData>> blockKeyValues = blockDataTable.getRangeKVs(null, 100, MetadataKeyFilters.getDeletingKeyFilter());
List<String> decodedKeys = new ArrayList<>();
for (Table.KeyValue<String, BlockData> blockDataKV : blockKeyValues) {
decodedKeys.add(blockDataKV.getKey());
}
// Apply the deleting prefix to the saved block IDs so we can compare
// them to the retrieved keys.
List<String> expectedKeys = TestDB.DELETING_BLOCK_IDS.stream().map(key -> OzoneConsts.DELETING_KEY_PREFIX + key).collect(Collectors.toList());
Assert.assertEquals(expectedKeys, decodedKeys);
// Test reading deleting blocks with block iterator.
MetadataKeyFilters.KeyPrefixFilter filter = MetadataKeyFilters.getDeletingKeyFilter();
try (BlockIterator<BlockData> iter = refCountedDB.getStore().getBlockIterator(filter)) {
List<String> iteratorBlockIDs = new ArrayList<>();
while (iter.hasNext()) {
long localID = iter.nextBlock().getBlockID().getLocalID();
iteratorBlockIDs.add(Long.toString(localID));
}
Assert.assertEquals(TestDB.DELETING_BLOCK_IDS, iteratorBlockIDs);
}
}
}
Aggregations