use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestBlockManagerImpl method testPutAndGetBlock.
@Test
public void testPutAndGetBlock() throws Exception {
assertEquals(0, keyValueContainer.getContainerData().getBlockCount());
// Put Block
blockManager.putBlock(keyValueContainer, blockData);
assertEquals(1, keyValueContainer.getContainerData().getBlockCount());
// Get Block
BlockData fromGetBlockData = blockManager.getBlock(keyValueContainer, blockData.getBlockID());
assertEquals(blockData.getContainerID(), fromGetBlockData.getContainerID());
assertEquals(blockData.getLocalID(), fromGetBlockData.getLocalID());
assertEquals(blockData.getChunks().size(), fromGetBlockData.getChunks().size());
assertEquals(blockData.getMetadata().size(), fromGetBlockData.getMetadata().size());
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestBlockManagerImpl method setUp.
@Before
public void setUp() throws Exception {
config = new OzoneConfiguration();
UUID datanodeId = UUID.randomUUID();
HddsVolume hddsVolume = new HddsVolume.Builder(folder.getRoot().getAbsolutePath()).conf(config).datanodeUuid(datanodeId.toString()).build();
volumeSet = mock(MutableVolumeSet.class);
volumeChoosingPolicy = mock(RoundRobinVolumeChoosingPolicy.class);
Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong())).thenReturn(hddsVolume);
keyValueContainerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(5), UUID.randomUUID().toString(), datanodeId.toString());
keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
// Creating BlockData
blockID = new BlockID(1L, 1L);
blockData = new BlockData(blockID);
blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
blockData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, 1024);
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
// Creating BlockData
blockID1 = new BlockID(1L, 2L);
blockData1 = new BlockData(blockID1);
blockData1.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
blockData1.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
List<ContainerProtos.ChunkInfo> chunkList1 = new ArrayList<>();
ChunkInfo info1 = new ChunkInfo(String.format("%d.data.%d", blockID1.getLocalID(), 0), 0, 1024);
chunkList1.add(info1.getProtoBufMessage());
blockData1.setChunks(chunkList1);
blockData1.setBlockCommitSequenceId(1);
// Create KeyValueContainerManager
blockManager = new BlockManagerImpl(config);
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestBlockManagerImpl method testPutBlock.
@Test
public void testPutBlock() throws Exception {
assertEquals(0, keyValueContainer.getContainerData().getBlockCount());
// Put Block with bcsId != 0
blockManager.putBlock(keyValueContainer, blockData1);
BlockData fromGetBlockData;
// Check Container's bcsId
fromGetBlockData = blockManager.getBlock(keyValueContainer, blockData1.getBlockID());
assertEquals(1, keyValueContainer.getContainerData().getBlockCount());
assertEquals(1, keyValueContainer.getContainerData().getBlockCommitSequenceId());
assertEquals(1, fromGetBlockData.getBlockCommitSequenceId());
// Put Block with bcsId == 0
blockManager.putBlock(keyValueContainer, blockData);
// Check Container's bcsId
fromGetBlockData = blockManager.getBlock(keyValueContainer, blockData.getBlockID());
assertEquals(2, keyValueContainer.getContainerData().getBlockCount());
assertEquals(0, fromGetBlockData.getBlockCommitSequenceId());
assertEquals(1, keyValueContainer.getContainerData().getBlockCommitSequenceId());
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class TestKeyValueBlockIterator method testKeyValueBlockIteratorWithMixedBlocks.
@Test
public void testKeyValueBlockIteratorWithMixedBlocks() throws Exception {
int deletingBlocks = 5;
int normalBlocks = 5;
Map<String, List<Long>> blockIDs = createContainerWithBlocks(CONTAINER_ID, normalBlocks, deletingBlocks);
// Default filter used is all unprefixed blocks.
List<Long> unprefixedBlockIDs = blockIDs.get("");
try (BlockIterator<BlockData> keyValueBlockIterator = db.getStore().getBlockIterator()) {
Iterator<Long> blockIDIter = unprefixedBlockIDs.iterator();
while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock();
assertEquals(blockData.getLocalID(), (long) blockIDIter.next());
}
assertFalse(keyValueBlockIterator.hasNext());
assertFalse(blockIDIter.hasNext());
keyValueBlockIterator.seekToFirst();
blockIDIter = unprefixedBlockIDs.iterator();
while (keyValueBlockIterator.hasNext()) {
BlockData blockData = keyValueBlockIterator.nextBlock();
assertEquals(blockData.getLocalID(), (long) blockIDIter.next());
}
assertFalse(keyValueBlockIterator.hasNext());
assertFalse(blockIDIter.hasNext());
try {
keyValueBlockIterator.nextBlock();
} catch (NoSuchElementException ex) {
GenericTestUtils.assertExceptionContains("Block Iterator reached end " + "for ContainerID " + CONTAINER_ID, ex);
}
}
}
use of org.apache.hadoop.ozone.container.common.helpers.BlockData in project ozone by apache.
the class CommonChunkManagerTestCases method testWriteReadChunk.
@Test
public void testWriteReadChunk() throws Exception {
// GIVEN
ChunkManager chunkManager = createTestSubject();
checkWriteIOStats(0, 0);
DispatcherContext dispatcherContext = getDispatcherContext();
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
ChunkInfo chunkInfo = getChunkInfo();
chunkManager.writeChunk(container, blockID, chunkInfo, getData(), dispatcherContext);
checkWriteIOStats(chunkInfo.getLen(), 1);
checkReadIOStats(0, 0);
BlockData blockData = new BlockData(blockID);
blockData.addChunk(chunkInfo.getProtoBufMessage());
getBlockManager().putBlock(container, blockData);
ByteBuffer expectedData = chunkManager.readChunk(container, blockID, chunkInfo, dispatcherContext).toByteString().asReadOnlyByteBuffer();
// THEN
assertEquals(chunkInfo.getLen(), expectedData.remaining());
assertEquals(expectedData.rewind(), rewindBufferToDataStart());
checkReadIOStats(expectedData.limit(), 1);
}
Aggregations