Search in sources :

Example 1 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class DeleteBlocksCommandHandler method markBlocksForDeletionSchemaV1.

private void markBlocksForDeletionSchemaV1(KeyValueContainerData containerData, DeletedBlocksTransaction delTX) throws IOException {
    long containerId = delTX.getContainerID();
    if (!isTxnIdValid(containerId, containerData, delTX)) {
        return;
    }
    int newDeletionBlocks = 0;
    try (ReferenceCountedDB containerDB = BlockUtils.getDB(containerData, conf)) {
        Table<String, BlockData> blockDataTable = containerDB.getStore().getBlockDataTable();
        Table<String, ChunkInfoList> deletedBlocksTable = containerDB.getStore().getDeletedBlocksTable();
        try (BatchOperation batch = containerDB.getStore().getBatchHandler().initBatchOperation()) {
            for (Long blkLong : delTX.getLocalIDList()) {
                String blk = blkLong.toString();
                BlockData blkInfo = blockDataTable.get(blk);
                if (blkInfo != null) {
                    String deletingKey = OzoneConsts.DELETING_KEY_PREFIX + blk;
                    if (blockDataTable.get(deletingKey) != null || deletedBlocksTable.get(blk) != null) {
                        if (LOG.isDebugEnabled()) {
                            LOG.debug(String.format("Ignoring delete for block %s in container %d." + " Entry already added.", blk, containerId));
                        }
                        continue;
                    }
                    // Found the block in container db,
                    // use an atomic update to change its state to deleting.
                    blockDataTable.putWithBatch(batch, deletingKey, blkInfo);
                    blockDataTable.deleteWithBatch(batch, blk);
                    newDeletionBlocks++;
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Transited Block {} to DELETING state in container {}", blk, containerId);
                    }
                } else {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug("Block {} not found or already under deletion in" + " container {}, skip deleting it.", blk, containerId);
                    }
                }
            }
            updateMetaData(containerData, delTX, newDeletionBlocks, containerDB, batch);
            containerDB.getStore().getBatchHandler().commitBatchOperation(batch);
        } catch (IOException e) {
            // with a certain number of retries.
            throw new IOException("Failed to delete blocks for TXID = " + delTX.getTxID(), e);
        }
    }
}
Also used : BatchOperation(org.apache.hadoop.hdds.utils.db.BatchOperation) IOException(java.io.IOException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) ChunkInfoList(org.apache.hadoop.ozone.container.common.helpers.ChunkInfoList)

Example 2 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class ContainerTestHelper method newPutBlockRequestBuilder.

public static Builder newPutBlockRequestBuilder(Pipeline pipeline, ContainerProtos.WriteChunkRequestProtoOrBuilder writeRequest) throws IOException {
    LOG.trace("putBlock: {} to pipeline={}", writeRequest.getBlockID(), pipeline);
    ContainerProtos.PutBlockRequestProto.Builder putRequest = ContainerProtos.PutBlockRequestProto.newBuilder();
    BlockData blockData = new BlockData(BlockID.getFromProtobuf(writeRequest.getBlockID()));
    List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
    newList.add(writeRequest.getChunkData());
    blockData.setChunks(newList);
    blockData.setBlockCommitSequenceId(0);
    putRequest.setBlockData(blockData.getProtoBufMessage());
    Builder request = ContainerCommandRequestProto.newBuilder();
    request.setCmdType(ContainerProtos.Type.PutBlock);
    request.setContainerID(blockData.getContainerID());
    request.setPutBlock(putRequest);
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    return request;
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) Builder(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) LinkedList(java.util.LinkedList)

Example 3 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class BlockManagerImpl method getBlockByID.

private BlockData getBlockByID(ReferenceCountedDB db, BlockID blockID) throws IOException {
    String blockKey = Long.toString(blockID.getLocalID());
    BlockData blockData = db.getStore().getBlockDataTable().get(blockKey);
    if (blockData == null) {
        throw new StorageContainerException(NO_SUCH_BLOCK_ERR_MSG, NO_SUCH_BLOCK);
    }
    return blockData;
}
Also used : StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData)

Example 4 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestStorageContainerManagerHelper method getPendingDeletionBlocks.

public List<String> getPendingDeletionBlocks(Long containerID) throws IOException {
    List<String> pendingDeletionBlocks = Lists.newArrayList();
    ReferenceCountedDB meta = getContainerMetadata(containerID);
    KeyPrefixFilter filter = new KeyPrefixFilter().addFilter(OzoneConsts.DELETING_KEY_PREFIX);
    List<? extends Table.KeyValue<String, BlockData>> kvs = meta.getStore().getBlockDataTable().getRangeKVs(null, Integer.MAX_VALUE, filter);
    for (Table.KeyValue<String, BlockData> entry : kvs) {
        pendingDeletionBlocks.add(entry.getKey().replace(OzoneConsts.DELETING_KEY_PREFIX, ""));
    }
    meta.close();
    return pendingDeletionBlocks;
}
Also used : Table(org.apache.hadoop.hdds.utils.db.Table) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) KeyPrefixFilter(org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Example 5 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestFailureHandlingByClient method testBlockCountOnFailures.

/**
 * Test whether blockData and Container metadata (block count and used
 * bytes) is updated correctly when there is a write failure.
 * We can combine this test with {@link #testBlockWritesWithDnFailures()}
 * as that test also simulates a write failure and client writes failed
 * chunk writes to a new block.
 */
private void testBlockCountOnFailures(OmKeyInfo omKeyInfo) throws Exception {
    // testBlockWritesWithDnFailures writes chunkSize*2.5 size of data into
    // KeyOutputStream. But before closing the outputStream, 2 of the DNs in
    // the pipeline being written to are closed. So the key will be written
    // to 2 blocks as atleast the last 0.5 chunk would not be committed to the
    // first block before the stream is closed.
    /**
     * There are 3 possible scenarios:
     * 1. Block1 has 2 chunks and OMKeyInfo also has 2 chunks against this block
     *    => Block2 should have 1 chunk
     *    (2 chunks were written to Block1, committed and acknowledged by
     *    CommitWatcher)
     * 2. Block1 has 1 chunk and OMKeyInfo has 1 chunk against this block
     *    => Block2 should have 2 chunks
     *    (Possibly 2 chunks were written but only 1 was committed to the
     *    block)
     * 3. Block1 has 2 chunks but OMKeyInfo has only 1 chunk against this block
     *    => Block2 should have 2 chunks
     *    (This happens when the 2nd chunk has been committed to Block1 but
     *    not acknowledged by CommitWatcher before pipeline shutdown)
     */
    // Get information about the first and second block (in different pipelines)
    List<OmKeyLocationInfo> locationList = omKeyInfo.getLatestVersionLocations().getLocationList();
    long containerId1 = locationList.get(0).getContainerID();
    List<DatanodeDetails> block1DNs = locationList.get(0).getPipeline().getNodes();
    long containerId2 = locationList.get(1).getContainerID();
    List<DatanodeDetails> block2DNs = locationList.get(1).getPipeline().getNodes();
    int block2ExpectedChunkCount;
    if (locationList.get(0).getLength() == 2 * chunkSize) {
        // Scenario 1
        block2ExpectedChunkCount = 1;
    } else {
        // Scenario 2
        block2ExpectedChunkCount = 2;
    }
    // For the first block, first 2 DNs in the pipeline are shutdown (to
    // simulate a failure). It should have 1 or 2 chunks (depending on
    // whether the DN CommitWatcher successfully acknowledged the 2nd chunk
    // write or not). The 3rd chunk would not exist on the first pipeline as
    // the pipeline would be closed before the last 0.5 chunk was committed
    // to the block.
    KeyValueContainerData containerData1 = ((KeyValueContainer) cluster.getHddsDatanode(block1DNs.get(2)).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerId1)).getContainerData();
    try (ReferenceCountedDB containerDb1 = BlockUtils.getDB(containerData1, conf)) {
        BlockData blockData1 = containerDb1.getStore().getBlockDataTable().get(Long.toString(locationList.get(0).getBlockID().getLocalID()));
        // The first Block could have 1 or 2 chunkSize of data
        int block1NumChunks = blockData1.getChunks().size();
        Assert.assertTrue(block1NumChunks >= 1);
        Assert.assertEquals(chunkSize * block1NumChunks, blockData1.getSize());
        Assert.assertEquals(1, containerData1.getBlockCount());
        Assert.assertEquals(chunkSize * block1NumChunks, containerData1.getBytesUsed());
    }
    // Verify that the second block has the remaining 0.5*chunkSize of data
    KeyValueContainerData containerData2 = ((KeyValueContainer) cluster.getHddsDatanode(block2DNs.get(0)).getDatanodeStateMachine().getContainer().getContainerSet().getContainer(containerId2)).getContainerData();
    try (ReferenceCountedDB containerDb2 = BlockUtils.getDB(containerData2, conf)) {
        BlockData blockData2 = containerDb2.getStore().getBlockDataTable().get(Long.toString(locationList.get(1).getBlockID().getLocalID()));
        // The second Block should have 0.5 chunkSize of data
        Assert.assertEquals(block2ExpectedChunkCount, blockData2.getChunks().size());
        Assert.assertEquals(1, containerData2.getBlockCount());
        int expectedBlockSize;
        if (block2ExpectedChunkCount == 1) {
            expectedBlockSize = chunkSize / 2;
        } else {
            expectedBlockSize = chunkSize + chunkSize / 2;
        }
        Assert.assertEquals(expectedBlockSize, blockData2.getSize());
        Assert.assertEquals(expectedBlockSize, containerData2.getBytesUsed());
    }
}
Also used : DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Aggregations

BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)47 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)22 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)22 BlockID (org.apache.hadoop.hdds.client.BlockID)21 Test (org.junit.Test)18 ArrayList (java.util.ArrayList)15 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)12 IOException (java.io.IOException)11 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)11 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)8 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)8 LinkedList (java.util.LinkedList)7 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)7 File (java.io.File)6 Table (org.apache.hadoop.hdds.utils.db.Table)5 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)5 DispatcherContext (org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext)5 List (java.util.List)4 ByteBuffer (java.nio.ByteBuffer)3 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)3