Search in sources :

Example 1 with ChunkInfoList

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList in project ozone by apache.

the class DeleteBlocksCommandHandler method markBlocksForDeletionSchemaV1.

private void markBlocksForDeletionSchemaV1(KeyValueContainerData containerData, DeletedBlocksTransaction delTX) throws IOException {
    long containerId = delTX.getContainerID();
    if (!isTxnIdValid(containerId, containerData, delTX)) {
        return;
    }
    int newDeletionBlocks = 0;
    try (ReferenceCountedDB containerDB = BlockUtils.getDB(containerData, conf)) {
        Table<String, BlockData> blockDataTable = containerDB.getStore().getBlockDataTable();
        Table<String, ChunkInfoList> deletedBlocksTable = containerDB.getStore().getDeletedBlocksTable();
        try (BatchOperation batch = containerDB.getStore().getBatchHandler().initBatchOperation()) {
            for (Long blkLong : delTX.getLocalIDList()) {
                String blk = blkLong.toString();
                BlockData blkInfo = blockDataTable.get(blk);
                if (blkInfo != null) {
                    String deletingKey = OzoneConsts.DELETING_KEY_PREFIX + blk;
                    if (blockDataTable.get(deletingKey) != null || deletedBlocksTable.get(blk) != null) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug(String.format("Ignoring delete for block %s in container %d." + " Entry already added.", blk, containerId));
                        }
                        continue;
                    }
                    // Found the block in container db,
                    // use an atomic update to change its state to deleting.
                    blockDataTable.putWithBatch(batch, deletingKey, blkInfo);
                    blockDataTable.deleteWithBatch(batch, blk);
                    newDeletionBlocks++;
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Transited Block {} to DELETING state in container {}", blk, containerId);
                    }
                } else {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Block {} not found or already under deletion in" + " container {}, skip deleting it.", blk, containerId);
                    }
                }
            }
            updateMetaData(containerData, delTX, newDeletionBlocks, containerDB, batch);
            containerDB.getStore().getBatchHandler().commitBatchOperation(batch);
        } catch (IOException e) {
            // with a certain number of retries.
            throw new IOException("Failed to delete blocks for TXID = " + delTX.getTxID(), e);
        }
    }
}
Also used : BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) IOException(java.io.IOException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ChunkInfoList(org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList)

Example 2 with ChunkInfoList

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList in project ozone by apache.

the class TestSchemaOneBackwardsCompatibility method testReadDeletedBlocks.

@Test
public void testReadDeletedBlocks() throws Exception {
    try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
        Table<String, ChunkInfoList> deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable();
        for (String blockID : TestDB.DELETED_BLOCK_IDS) {
            // Since chunk info for deleted blocks was not stored in schema
            // version 1, there is no value to retrieve here.
            Assert.assertTrue(deletedBlocksTable.isExist(blockID));
        }
        // Test decoding keys from the database.
        List<? extends Table.KeyValue<String, ChunkInfoList>> chunkInfoKeyValues = deletedBlocksTable.getRangeKVs(null, 100);
        List<String> decodedKeys = new ArrayList<>();
        for (Table.KeyValue<String, ChunkInfoList> kv : chunkInfoKeyValues) {
            decodedKeys.add(kv.getKey());
        }
        Assert.assertEquals(TestDB.DELETED_BLOCK_IDS, decodedKeys);
    }
}
Also used : SchemaOneDeletedBlocksTable(org.apache.hadoop.ozone.container.metadata.SchemaOneDeletedBlocksTable) Table(org.apache.hadoop.hdds.utils.db.Table) ArrayList(java.util.ArrayList) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ChunkInfoList(org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList) Test(org.junit.Test)

Example 3 with ChunkInfoList

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList in project ozone by apache.

the class TestSchemaOneBackwardsCompatibility method testBlockIteration.

/**
 * Counts the number of deleted, pending delete, and regular blocks in the
 * database, and checks that they match the expected values.
 * Also makes sure that internal prefixes used to manage data in the schema
 * one deleted blocks table are removed from keys in iterator results.
 * @throws IOException
 */
@Test
public void testBlockIteration() throws IOException {
    try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
        assertEquals(TestDB.NUM_DELETED_BLOCKS, countDeletedBlocks(refCountedDB));
        assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, countDeletingBlocks(refCountedDB));
        assertEquals(TestDB.KEY_COUNT - TestDB.NUM_PENDING_DELETION_BLOCKS, countUnprefixedBlocks(refCountedDB));
        // Test that deleted block keys do not have a visible prefix when
        // iterating.
        final String prefix = SchemaOneDeletedBlocksTable.DELETED_KEY_PREFIX;
        Table<String, ChunkInfoList> deletedBlocksTable = refCountedDB.getStore().getDeletedBlocksTable();
        // Test rangeKVs.
        List<? extends Table.KeyValue<String, ChunkInfoList>> deletedBlocks = deletedBlocksTable.getRangeKVs(null, 100);
        for (Table.KeyValue<String, ChunkInfoList> kv : deletedBlocks) {
            assertFalse(kv.getKey().contains(prefix));
        }
        // Test sequentialRangeKVs.
        deletedBlocks = deletedBlocksTable.getRangeKVs(null, 100);
        for (Table.KeyValue<String, ChunkInfoList> kv : deletedBlocks) {
            assertFalse(kv.getKey().contains(prefix));
        }
    }
}
Also used : SchemaOneDeletedBlocksTable(org.apache.hadoop.ozone.container.metadata.SchemaOneDeletedBlocksTable) Table(org.apache.hadoop.hdds.utils.db.Table) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ChunkInfoList(org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList) Test(org.junit.Test)

Example 4 with ChunkInfoList

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList in project ozone by apache.

the class TestSchemaOneBackwardsCompatibility method testReadDeletedBlockChunkInfo.

/**
 * Tests reading the chunk info saved from a block that was deleted from a
 * database in schema version one. Blocks deleted from schema version one
 * before the upgrade will have the block ID saved as their value. Trying
 * to retrieve this value as a {@link ChunkInfoList} should fail. Blocks
 * deleted from schema version one after the upgrade should have their
 * {@link ChunkInfoList} saved as the corresponding value in the deleted
 * blocks table. Reading these values should succeed.
 * @throws Exception
 */
@Test
public void testReadDeletedBlockChunkInfo() throws Exception {
    String datanodeUuid = UUID.randomUUID().toString();
    ContainerSet containerSet = makeContainerSet();
    VolumeSet volumeSet = new MutableVolumeSet(datanodeUuid, conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
    ContainerMetrics metrics = ContainerMetrics.create(conf);
    KeyValueHandler keyValueHandler = new KeyValueHandler(conf, datanodeUuid, containerSet, volumeSet, metrics, c -> {
    });
    try (ReferenceCountedDB refCountedDB = BlockUtils.getDB(newKvData(), conf)) {
        // Read blocks that were already deleted before the upgrade.
        List<? extends Table.KeyValue<String, ChunkInfoList>> deletedBlocks = refCountedDB.getStore().getDeletedBlocksTable().getRangeKVs(null, 100);
        Set<String> preUpgradeBlocks = new HashSet<>();
        for (Table.KeyValue<String, ChunkInfoList> chunkListKV : deletedBlocks) {
            preUpgradeBlocks.add(chunkListKV.getKey());
            try {
                chunkListKV.getValue();
                Assert.fail("No exception thrown when trying to retrieve old " + "deleted blocks values as chunk lists.");
            } catch (IOException ex) {
            // Exception thrown as expected.
            }
        }
        Assert.assertEquals(TestDB.NUM_DELETED_BLOCKS, preUpgradeBlocks.size());
        long initialTotalSpace = newKvData().getBytesUsed();
        long blockSpace = initialTotalSpace / TestDB.KEY_COUNT;
        runBlockDeletingService(keyValueHandler);
        GenericTestUtils.waitFor(() -> {
            try {
                return (newKvData().getBytesUsed() != initialTotalSpace);
            } catch (IOException ex) {
            }
            return false;
        }, 100, 3000);
        long currentTotalSpace = newKvData().getBytesUsed();
        // After the block deleting service runs, get the number of
        // deleted blocks.
        long numberOfBlocksDeleted = (initialTotalSpace - currentTotalSpace) / blockSpace;
        // The blocks that were originally marked for deletion should now be
        // deleted.
        Assert.assertEquals(TestDB.NUM_PENDING_DELETION_BLOCKS, numberOfBlocksDeleted);
    }
}
Also used : SchemaOneDeletedBlocksTable(org.apache.hadoop.ozone.container.metadata.SchemaOneDeletedBlocksTable) Table(org.apache.hadoop.hdds.utils.db.Table) IOException(java.io.IOException) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ChunkInfoList(org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList) KeyValueHandler(org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler) ContainerSet(org.apache.hadoop.ozone.container.common.impl.ContainerSet) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) ContainerMetrics(org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) VolumeSet(org.apache.hadoop.ozone.container.common.volume.VolumeSet) HashSet(java.util.HashSet) Test(org.junit.Test)

Aggregations

ChunkInfoList (org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList)4 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)4 Table (org.apache.hadoop.hdds.utils.db.Table)3 SchemaOneDeletedBlocksTable (org.apache.hadoop.ozone.container.metadata.SchemaOneDeletedBlocksTable)3 Test (org.junit.Test)3 IOException (java.io.IOException)2 ArrayList (java.util.ArrayList)1 HashSet (java.util.HashSet)1 BatchOperation (org.apache.hadoop.hdds.utils.db.BatchOperation)1 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)1 ContainerMetrics (org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics)1 ContainerSet (org.apache.hadoop.ozone.container.common.impl.ContainerSet)1 MutableVolumeSet (org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet)1 VolumeSet (org.apache.hadoop.ozone.container.common.volume.VolumeSet)1 KeyValueHandler (org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler)1