Search in sources :

Example 16 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestBlockManagerImpl method testPutAndGetBlock.

@Test
public void testPutAndGetBlock() throws Exception {
    assertEquals(0, keyValueContainer.getContainerData().getBlockCount());
    // Put Block
    blockManager.putBlock(keyValueContainer, blockData);
    assertEquals(1, keyValueContainer.getContainerData().getBlockCount());
    // Get Block
    BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer, blockData.getBlockID());
    assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID());
    assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID());
    assertEquals(blockData.getChunks().size(), fromGetBlockData.getChunks().size());
    assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata().size());
}
Also used : BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) Test(org.junit.Test)

Example 17 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestBlockManagerImpl method setUp.

@Before
public void setUp() throws Exception {
    config = new OzoneConfiguration();
    UUID datanodeId = UUID.randomUUID();
    HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
    volumeSet = mock(MutableVolumeSet.class);
    volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
    Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
    keyValueContainerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
    keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
    keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
    // Creating BlockData
    blockID = new BlockID(1L, 1L);
    blockData = new BlockData(blockID);
    blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
    blockData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
    List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
    ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, 1024);
    chunkList.add(info.getProtoBufMessage());
    blockData.setChunks(chunkList);
    // Creating BlockData
    blockID1 = new BlockID(1L, 2L);
    blockData1 = new BlockData(blockID1);
    blockData1.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
    blockData1.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
    List<ContainerProtos.ChunkInfo> chunkList1 = new ArrayList<>();
    ChunkInfo info1 = new ChunkInfo(String.format("%d.data.%d", blockID1.getLocalID(), 0), 0, 1024);
    chunkList1.add(info1.getProtoBufMessage());
    blockData1.setChunks(chunkList1);
    blockData1.setBlockCommitSequenceId(1);
    // Create KeyValueContainerManager
    blockManager = new BlockManagerImpl(config);
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) BlockID(org.apache.hadoop.hdds.client.BlockID) MutableVolumeSet(org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet) UUID(java.util.UUID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Before(org.junit.Before)

Example 18 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestBlockManagerImpl method testPutBlock.

@Test
public void testPutBlock() throws Exception {
    assertEquals(0, keyValueContainer.getContainerData().getBlockCount());
    // Put Block with bcsId != 0
    blockManager.putBlock(keyValueContainer, blockData1);
    BlockData fromGetBlockData;
    // Check Container's bcsId
    fromGetBlockData = blockManager.getBlock(keyValueContainer, blockData1.getBlockID());
    assertEquals(1, keyValueContainer.getContainerData().getBlockCount());
    assertEquals(1, keyValueContainer.getContainerData().getBlockCommitSequenceId());
    assertEquals(1, fromGetBlockData.getBlockCommitSequenceId());
    // Put Block with bcsId == 0
    blockManager.putBlock(keyValueContainer, blockData);
    // Check Container's bcsId
    fromGetBlockData = blockManager.getBlock(keyValueContainer, blockData.getBlockID());
    assertEquals(2, keyValueContainer.getContainerData().getBlockCount());
    assertEquals(0, fromGetBlockData.getBlockCommitSequenceId());
    assertEquals(1, keyValueContainer.getContainerData().getBlockCommitSequenceId());
}
Also used : BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) Test(org.junit.Test)

Example 19 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class TestKeyValueBlockIterator method testKeyValueBlockIteratorWithMixedBlocks.

@Test
public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception {
    int deletingBlocks = 5;
    int normalBlocks = 5;
    Map<String, List<Long>> blockIDs = createContainerWithBlocks(CONTAINER_ID, normalBlocks, deletingBlocks);
    // Default filter used is all unprefixed blocks.
    List<Long> unprefixedBlockIDs = blockIDs.get("");
    try (BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator()) {
        Iterator<Long> blockIDIter = unprefixedBlockIDs.iterator();
        while (keyValueBlockIterator.hasNext()) {
            BlockData blockData = keyValueBlockIterator.nextBlock();
            assertEquals(blockData.getLocalID(), (long) blockIDIter.next());
        }
        assertFalse(keyValueBlockIterator.hasNext());
        assertFalse(blockIDIter.hasNext());
        keyValueBlockIterator.seekToFirst();
        blockIDIter = unprefixedBlockIDs.iterator();
        while (keyValueBlockIterator.hasNext()) {
            BlockData blockData = keyValueBlockIterator.nextBlock();
            assertEquals(blockData.getLocalID(), (long) blockIDIter.next());
        }
        assertFalse(keyValueBlockIterator.hasNext());
        assertFalse(blockIDIter.hasNext());
        try {
            keyValueBlockIterator.nextBlock();
        } catch (NoSuchElementException ex) {
            GenericTestUtils.assertExceptionContains("Block Iterator reached end " + "for ContainerID " + CONTAINER_ID, ex);
        }
    }
}
Also used : ArrayList(java.util.ArrayList) List(java.util.List) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) NoSuchElementException(java.util.NoSuchElementException) Test(org.junit.Test)

Example 20 with BlockData

use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.

the class CommonChunkManagerTestCases method testWriteReadChunk.

@Test
public void testWriteReadChunk() throws Exception {
    // GIVEN
    ChunkManager chunkManager = createTestSubject();
    checkWriteIOStats(0, 0);
    DispatcherContext dispatcherContext = getDispatcherContext();
    KeyValueContainer container = getKeyValueContainer();
    BlockID blockID = getBlockID();
    ChunkInfo chunkInfo = getChunkInfo();
    chunkManager.writeChunk(container, blockID, chunkInfo, getData(), dispatcherContext);
    checkWriteIOStats(chunkInfo.getLen(), 1);
    checkReadIOStats(0, 0);
    BlockData blockData = new BlockData(blockID);
    blockData.addChunk(chunkInfo.getProtoBufMessage());
    getBlockManager().putBlock(container, blockData);
    ByteBuffer expectedData = chunkManager.readChunk(container, blockID, chunkInfo, dispatcherContext).toByteString().asReadOnlyByteBuffer();
    // THEN
    assertEquals(chunkInfo.getLen(), expectedData.remaining());
    assertEquals(expectedData.rewind(), rewindBufferToDataStart());
    checkReadIOStats(expectedData.limit(), 1);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ByteBuffer(java.nio.ByteBuffer) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Aggregations

BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)47 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)22 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)22 BlockID (org.apache.hadoop.hdds.client.BlockID)21 Test (org.junit.Test)18 ArrayList (java.util.ArrayList)15 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)12 IOException (java.io.IOException)11 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)11 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)8 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)8 LinkedList (java.util.LinkedList)7 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)7 File (java.io.File)6 Table (org.apache.hadoop.hdds.utils.db.Table)5 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)5 DispatcherContext (org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext)5 List (java.util.List)4 ByteBuffer (java.nio.ByteBuffer)3 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)3