use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList in project ozone by apache.
the class DeleteBlocksCommandHandler method markBlocksForDeletionSchemaV1.
private void markBlocksForDeletionSchemaV1(KeyValueContainerData containerData, DeletedBlocksTransaction delTX) throws IOException {
long containerId = delTX.getContainerID();
if (!isTxnIdValid(containerId, containerData, delTX)) {
return;
}
int newDeletionBlocks = 0;
try (ReferenceCountedDB containerDB = BlockUtils.getDB(containerData, conf)) {
Table<String, BlockData> blockDataTable = containerDB.getStore().getBlockDataTable();
Table<String, ChunkInfoList> deletedBlocksTable = containerDB.getStore().getDeletedBlocksTable();
try (BatchOperation batch = containerDB.getStore().getBatchHandler().initBatchOperation()) {
for (Long blkLong : delTX.getLocalIDList()) {
String blk = blkLong.toString();
BlockData blkInfo = blockDataTable.get(blk);
if (blkInfo != null) {
String deletingKey = OzoneConsts.DELETING_KEY_PREFIX + blk;
if (blockDataTable.get(deletingKey) != null || deletedBlocksTable.get(blk) != null) {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("Ignoring delete for block %s in container %d." + " Entry already added.", blk, containerId));
}
continue;
}
// Found the block in container db,
// use an atomic update to change its state to deleting.
blockDataTable.putWithBatch(batch, deletingKey, blkInfo);
blockDataTable.deleteWithBatch(batch, blk);
newDeletionBlocks++;
if (LOG.isDebugEnabled()) {
LOG.debug("Transited Block {} to DELETING state in container {}", blk, containerId);
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Block {} not found or already under deletion in" + " container {}, skip deleting it.", blk, containerId);
}
}
}
updateMetaData(containerData, delTX, newDeletionBlocks, containerDB, batch);
containerDB.getStore().getBatchHandler().commitBatchOperation(batch);
} catch (IOException e) {
// with a certain number of retries.
throw new IOException("Failed to delete blocks for TXID = " + delTX.getTxID(), e);
}
}
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList in project ozone by apache.
the class TestSchemaOneBackwardsCompatibility method testReadDeletedBlocks.
@Test
public void testReadDeletedBlocks() throws Exception {
try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
Table<String, ChunkInfoList> deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable();
for (String blockID : TestDB.DELETED_BLOCK_IDS) {
// Since chunk info for deleted blocks was not stored in schema
// version 1, there is no value to retrieve here.
Assert.assertTrue(deletedBlocksTable.isExist(blockID));
}
// Test decoding keys from the database.
List<? extends Table.KeyValue<String, ChunkInfoList>> chunkInfoKeyValues = deletedBlocksTable.getRangeKVs(null, 100);
List<String> decodedKeys = new ArrayList<>();
for (Table.KeyValue<String, ChunkInfoList> kv : chunkInfoKeyValues) {
decodedKeys.add(kv.getKey());
}
Assert.assertEquals(TestDB.DELETED_BLOCK_IDS, decodedKeys);
}
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList in project ozone by apache.
the class TestSchemaOneBackwardsCompatibility method testBlockIteration.
/**
* Counts the number of deleted, pending delete, and regular blocks in the
* database, and checks that they match the expected values.
* Also makes sure that internal prefixes used to manage data in the schema
* one deleted blocks table are removed from keys in iterator results.
* @throws IOException
*/
@Test
public void testBlockIteration() throws IOException {
try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
assertEquals(TestDB.NUM_DELETED_BLOCKS, countDeletedBlocks(refCountedDB));
assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, countDeletingBlocks(refCountedDB));
assertEquals(TestDB.KEY_COUNT - TestDB.NUM_PENDING_DELETION_BLOCKS, countUnprefixedBlocks(refCountedDB));
// Test that deleted block keys do not have a visible prefix when
// iterating.
final String prefix = SchemaOneDeletedBlocksTable.DELETED_KEY_PREFIX;
Table<String, ChunkInfoList> deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable();
// Test rangeKVs.
List<? extends Table.KeyValue<String, ChunkInfoList>> deletedBlocks = deletedBlocksTable.getRangeKVs(null, 100);
for (Table.KeyValue<String, ChunkInfoList> kv : deletedBlocks) {
assertFalse(kv.getKey().contains(prefix));
}
// Test sequentialRangeKVs.
deletedBlocks = deletedBlocksTable.getRangeKVs(null, 100);
for (Table.KeyValue<String, ChunkInfoList> kv : deletedBlocks) {
assertFalse(kv.getKey().contains(prefix));
}
}
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList in project ozone by apache.
the class TestSchemaOneBackwardsCompatibility method testReadDeletedBlockChunkInfo.
/**
* Tests reading the chunk info saved from a block that was deleted from a
* database in schema version one. Blocks deleted from schema version one
* before the upgrade will have the block ID saved as their value. Trying
* to retrieve this value as a {@link ChunkInfoList} should fail. Blocks
* deleted from schema version one after the upgrade should have their
* {@link ChunkInfoList} saved as the corresponding value in the deleted
* blocks table. Reading these values should succeed.
* @throws Exception
*/
@Test
public void testReadDeletedBlockChunkInfo() throws Exception {
String datanodeUuid = UUID.randomUUID().toString();
ContainerSet containerSet = makeContainerSet();
VolumeSet volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
ContainerMetrics metrics = ContainerMetrics.create(conf);
KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> {
});
try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
// Read blocks that were already deleted before the upgrade.
List<? extends Table.KeyValue<String, ChunkInfoList>> deletedBlocks = refCountedDB.getStore().getDeletedBlocksTable().getRangeKVs(null, 100);
Set<String> preUpgradeBlocks = new HashSet<>();
for (Table.KeyValue<String, ChunkInfoList> chunkListKV : deletedBlocks) {
preUpgradeBlocks.add(chunkListKV.getKey());
try {
chunkListKV.getValue();
Assert.fail("No exception thrown when trying to retrieve old " + "deleted blocks values as chunk lists.");
} catch (IOException ex) {
// Exception thrown as expected.
}
}
Assert.assertEquals(TestDB.NUM_DELETED_BLOCKS, preUpgradeBlocks.size());
long initialTotalSpace = newKvData().getBytesUsed();
long blockSpace = initialTotalSpace / TestDB.KEY_COUNT;
runBlockDeletingService(keyValueHandler);
GenericTestUtils.waitFor(() -> {
try {
return (newKvData().getBytesUsed() != initialTotalSpace);
} catch (IOException ex) {
}
return false;
}, 100, 3000);
long currentTotalSpace = newKvData().getBytesUsed();
// After the block deleting service runs, get the number of
// deleted blocks.
long numberOfBlocksDeleted = (initialTotalSpace - currentTotalSpace) / blockSpace;
// The blocks that were originally marked for deletion should now be
// deleted.
Assert.assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, numberOfBlocksDeleted);
}
}
Aggregations