Search in sources :

Example 11 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestOzoneContainer method addBlocks.

private long addBlocks(KeyValueContainer container, int blocks, int chunksPerBlock) throws Exception {
    String strBlock = "block";
    String strChunk = "-chunkFile";
    int datalen = 65536;
    long usedBytes = 0;
    long freeBytes = container.getContainerData().getMaxSize();
    long containerId = container.getContainerData().getContainerID();
    ReferenceCountedDB db = BlockUtils.getDB(container.getContainerData(), conf);
    Table<String, Long> metadataTable = db.getStore().getMetadataTable();
    Table<String, BlockData> blockDataTable = db.getStore().getBlockDataTable();
    for (int bi = 0; bi < blocks; bi++) {
        // Creating BlockData
        BlockID blockID = new BlockID(containerId, bi);
        BlockData blockData = new BlockData(blockID);
        List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
        chunkList.clear();
        for (int ci = 0; ci < chunksPerBlock; ci++) {
            String chunkName = strBlock + bi + strChunk + ci;
            long offset = ci * (long) datalen;
            ChunkInfo info = new ChunkInfo(chunkName, offset, datalen);
            usedBytes += datalen;
            chunkList.add(info.getProtoBufMessage());
        }
        blockData.setChunks(chunkList);
        blockDataTable.put(Long.toString(blockID.getLocalID()), blockData);
    }
    // Set Block count and used bytes.
    metadataTable.put(OzoneConsts.BLOCK_COUNT, (long) blocks);
    metadataTable.put(OzoneConsts.CONTAINER_BYTES_USED, usedBytes);
    // remaining available capacity of the container
    db.close();
    return (freeBytes - usedBytes);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ArrayList(java.util.ArrayList) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Example 12 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestKeyValueContainer method populate.

/**
 * Add some keys to the container.
 */
private void populate(long numberOfKeysToWrite) throws IOException {
    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer.getContainerData(), CONF)) {
        Table<String, BlockData> blockDataTable = metadataStore.getStore().getBlockDataTable();
        for (long i = 0; i < numberOfKeysToWrite; i++) {
            blockDataTable.put("test" + i, new BlockData(new BlockID(i, i)));
        }
        // As now when we put blocks, we increment block count and update in DB.
        // As for test, we are doing manually so adding key count to DB.
        metadataStore.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COUNT, numberOfKeysToWrite);
    }
    Map<String, String> metadata = new HashMap<>();
    metadata.put("key1", "value1");
    keyValueContainer.update(metadata, true);
}
Also used : HashMap(java.util.HashMap) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Example 13 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestKeyValueContainerIntegrityChecks method createContainerWithBlocks.

/**
 * Creates a container with normal and deleted blocks.
 * First it will insert normal blocks, and then it will insert
 * deleted blocks.
 */
protected KeyValueContainer createContainerWithBlocks(long containerId, int normalBlocks, int deletedBlocks) throws Exception {
    String strBlock = "block";
    String strChunk = "-chunkFile";
    long totalBlocks = normalBlocks + deletedBlocks;
    int bytesPerChecksum = 2 * UNIT_LEN;
    Checksum checksum = new Checksum(ContainerProtos.ChecksumType.SHA256, bytesPerChecksum);
    byte[] chunkData = RandomStringUtils.randomAscii(CHUNK_LEN).getBytes(UTF_8);
    ChecksumData checksumData = checksum.computeChecksum(chunkData);
    DispatcherContext writeStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build();
    DispatcherContext commitStage = new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build();
    KeyValueContainerData containerData = new KeyValueContainerData(containerId, containerLayoutTestInfo.getLayout(), (long) CHUNKS_PER_BLOCK * CHUNK_LEN * totalBlocks, UUID.randomUUID().toString(), UUID.randomUUID().toString());
    KeyValueContainer container = new KeyValueContainer(containerData, conf);
    container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), UUID.randomUUID().toString());
    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(containerData, conf)) {
        assertNotNull(containerData.getChunksPath());
        File chunksPath = new File(containerData.getChunksPath());
        containerLayoutTestInfo.validateFileCount(chunksPath, 0, 0);
        List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
        for (int i = 0; i < totalBlocks; i++) {
            BlockID blockID = new BlockID(containerId, i);
            BlockData blockData = new BlockData(blockID);
            chunkList.clear();
            for (long chunkCount = 0; chunkCount < CHUNKS_PER_BLOCK; chunkCount++) {
                String chunkName = strBlock + i + strChunk + chunkCount;
                long offset = chunkCount * CHUNK_LEN;
                ChunkInfo info = new ChunkInfo(chunkName, offset, CHUNK_LEN);
                info.setChecksumData(checksumData);
                chunkList.add(info.getProtoBufMessage());
                chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), writeStage);
                chunkManager.writeChunk(container, blockID, info, ByteBuffer.wrap(chunkData), commitStage);
            }
            blockData.setChunks(chunkList);
            // normal key
            String key = Long.toString(blockID.getLocalID());
            if (i >= normalBlocks) {
                // deleted key
                key = OzoneConsts.DELETING_KEY_PREFIX + blockID.getLocalID();
            }
            metadataStore.getStore().getBlockDataTable().put(key, blockData);
        }
        containerLayoutTestInfo.validateFileCount(chunksPath, totalBlocks, totalBlocks * CHUNKS_PER_BLOCK);
    }
    return container;
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChecksumData(org.apache.hadoop.ozone.common.ChecksumData) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) ArrayList(java.util.ArrayList) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB) Checksum(org.apache.hadoop.ozone.common.Checksum) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) File(java.io.File)

Example 14 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestContainerPersistence method testPutBlockWithInvalidBCSId.

/**
 * Tests a put block and read block with invalid bcsId.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testPutBlockWithInvalidBCSId() throws IOException, NoSuchAlgorithmException {
    long testContainerID = getTestContainerID();
    Container container = addContainer(containerSet, testContainerID);
    BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID);
    ChunkInfo info = writeChunkHelper(blockID1);
    BlockData blockData = new BlockData(blockID1);
    List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
    chunkList.add(info.getProtoBufMessage());
    blockData.setChunks(chunkList);
    blockData.setBlockCommitSequenceId(3);
    blockManager.putBlock(container, blockData);
    chunkList.clear();
    // write a 2nd block
    BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID);
    info = writeChunkHelper(blockID2);
    blockData = new BlockData(blockID2);
    chunkList.add(info.getProtoBufMessage());
    blockData.setChunks(chunkList);
    blockData.setBlockCommitSequenceId(4);
    blockManager.putBlock(container, blockData);
    BlockData readBlockData;
    try {
        blockID1.setBlockCommitSequenceId(5);
        // read with bcsId higher than container bcsId
        blockManager.getBlock(container, blockID1);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID);
    }
    try {
        blockID1.setBlockCommitSequenceId(4);
        // read with bcsId lower than container bcsId but greater than committed
        // bcsId.
        blockManager.getBlock(container, blockID1);
        Assert.fail("Expected exception not thrown");
    } catch (StorageContainerException sce) {
        Assert.assertTrue(sce.getResult() == BCSID_MISMATCH);
    }
    readBlockData = blockManager.getBlock(container, blockData.getBlockID());
    ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
    Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData());
}
Also used : KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Example 15 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestContainerReader method markBlocksForDelete.

private void markBlocksForDelete(KeyValueContainer keyValueContainer, boolean setMetaData, List<Long> blockNames, int count) throws Exception {
    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer.getContainerData(), conf)) {
        for (int i = 0; i < count; i++) {
            Table<String, BlockData> blockDataTable = metadataStore.getStore().getBlockDataTable();
            String blk = Long.toString(blockNames.get(i));
            BlockData blkInfo = blockDataTable.get(blk);
            blockDataTable.delete(blk);
            blockDataTable.put(OzoneConsts.DELETING_KEY_PREFIX + blk, blkInfo);
        }
        if (setMetaData) {
            // Pending delete blocks are still counted towards the block count
            // and bytes used metadata values, so those do not change.
            Table<String, Long> metadataTable = metadataStore.getStore().getMetadataTable();
            metadataTable.put(OzoneConsts.PENDING_DELETE_BLOCK_COUNT, (long) count);
        }
    }
}
Also used : ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Aggregations

BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)47 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)22 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)22 BlockID (org.apache.hadoop.hdds.client.BlockID)21 Test (org.junit.Test)18 ArrayList (java.util.ArrayList)15 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)12 IOException (java.io.IOException)11 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)11 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)8 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)8 LinkedList (java.util.LinkedList)7 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)7 File (java.io.File)6 Table (org.apache.hadoop.hdds.utils.db.Table)5 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)5 DispatcherContext (org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext)5 List (java.util.List)4 ByteBuffer (java.nio.ByteBuffer)3 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)3