use of org.apache.hadoop.ozone.container.common.interfaces.Container in project ozone by apache.
the class TestContainerPersistence method testPutBlockWithLotsOfChunks.
/**
* Tests a put block and read block.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testPutBlockWithLotsOfChunks() throws IOException, NoSuchAlgorithmException {
final int chunkCount = 2;
final int datalen = 1024;
long totalSize = 0L;
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
List<ChunkInfo> chunkList = new LinkedList<>();
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = new ChunkInfo(String.format("%d.data", blockID.getLocalID()), x * datalen, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
totalSize += datalen;
chunkList.add(info);
}
long bytesUsed = container.getContainerData().getBytesUsed();
Assert.assertEquals(totalSize, bytesUsed);
long writeBytes = container.getContainerData().getWriteBytes();
Assert.assertEquals(chunkCount * datalen, writeBytes);
long readCount = container.getContainerData().getReadCount();
Assert.assertEquals(0, readCount);
long writeCount = container.getContainerData().getWriteCount();
Assert.assertEquals(chunkCount, writeCount);
BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
for (ChunkInfo i : chunkList) {
chunkProtoList.add(i.getProtoBufMessage());
}
blockData.setChunks(chunkProtoList);
blockManager.putBlock(container, blockData);
BlockData readBlockData = blockManager.getBlock(container, blockData.getBlockID());
ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData.getChunks().size() - 1));
Assert.assertEquals(lastChunk.getChecksumData(), readChunk.getChecksumData());
}
use of org.apache.hadoop.ozone.container.common.interfaces.Container in project ozone by apache.
the class TestContainerPersistence method testDeleteContainer.
@Test
public void testDeleteContainer() throws Exception {
long testContainerID1 = getTestContainerID();
Thread.sleep(100);
long testContainerID2 = getTestContainerID();
Container container1 = addContainer(containerSet, testContainerID1);
container1.close();
Container container2 = addContainer(containerSet, testContainerID2);
Assert.assertTrue(containerSet.getContainerMapCopy().containsKey(testContainerID1));
Assert.assertTrue(containerSet.getContainerMapCopy().containsKey(testContainerID2));
container1.delete();
containerSet.removeContainer(testContainerID1);
Assert.assertFalse(containerSet.getContainerMapCopy().containsKey(testContainerID1));
// Adding block to a deleted container should fail.
exception.expect(StorageContainerException.class);
exception.expectMessage("Error opening DB.");
BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID1);
BlockData someKey1 = new BlockData(blockID1);
someKey1.setChunks(new LinkedList<ContainerProtos.ChunkInfo>());
blockManager.putBlock(container1, someKey1);
// Deleting a non-empty container should fail.
BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID2);
BlockData someKey2 = new BlockData(blockID2);
someKey2.setChunks(new LinkedList<ContainerProtos.ChunkInfo>());
blockManager.putBlock(container2, someKey2);
exception.expect(StorageContainerException.class);
exception.expectMessage("Container cannot be deleted because it is not empty.");
container2.delete();
Assert.assertTrue(containerSet.getContainerMapCopy().containsKey(testContainerID2));
}
use of org.apache.hadoop.ozone.container.common.interfaces.Container in project ozone by apache.
the class TestHddsDispatcher method testContainerCloseActionWhenFull.
@Test
public void testContainerCloseActionWhenFull() throws IOException {
String testDir = GenericTestUtils.getTempPath(TestHddsDispatcher.class.getSimpleName());
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDir);
conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
DatanodeDetails dd = randomDatanodeDetails();
MutableVolumeSet volumeSet = new MutableVolumeSet(dd.getUuidString(), conf, null, StorageVolume.VolumeType.DATA_VOLUME, null);
try {
UUID scmId = UUID.randomUUID();
ContainerSet containerSet = new ContainerSet();
DatanodeStateMachine stateMachine = Mockito.mock(DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
Mockito.when(context.getParent()).thenReturn(stateMachine);
KeyValueContainerData containerData = new KeyValueContainerData(1L, layout, (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(), dd.getUuidString());
Container container = new KeyValueContainer(containerData, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId.toString());
containerSet.addContainer(container);
ContainerMetrics metrics = ContainerMetrics.create(conf);
Map<ContainerType, Handler> handlers = Maps.newHashMap();
for (ContainerType containerType : ContainerType.values()) {
handlers.put(containerType, Handler.getHandlerForContainerType(containerType, conf, context.getParent().getDatanodeDetails().getUuidString(), containerSet, volumeSet, metrics, NO_OP_ICR_SENDER));
}
HddsDispatcher hddsDispatcher = new HddsDispatcher(conf, containerSet, volumeSet, handlers, context, metrics, null);
hddsDispatcher.setClusterId(scmId.toString());
ContainerCommandResponseProto responseOne = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 1L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseOne.getResult());
verify(context, times(0)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
containerData.setBytesUsed(Double.valueOf(StorageUnit.MB.toBytes(950)).longValue());
ContainerCommandResponseProto responseTwo = hddsDispatcher.dispatch(getWriteChunkRequest(dd.getUuidString(), 1L, 2L), null);
Assert.assertEquals(ContainerProtos.Result.SUCCESS, responseTwo.getResult());
verify(context, times(1)).addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
} finally {
volumeSet.shutdown();
ContainerMetrics.remove();
FileUtils.deleteDirectory(new File(testDir));
}
}
use of org.apache.hadoop.ozone.container.common.interfaces.Container in project ozone by apache.
the class TestContainerPersistence method testListContainer.
/**
* This test creates 50 containers and reads them back 5 containers at a time
* and verifies that we did get back all containers.
*
* @throws IOException
*/
@Test
public void testListContainer() throws IOException {
final int count = 10;
final int step = 5;
Map<Long, ContainerData> testMap = new HashMap<>();
for (int x = 0; x < count; x++) {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
testMap.put(testContainerID, container.getContainerData());
}
int counter = 0;
long prevKey = 0;
List<ContainerData> results = new LinkedList<>();
while (counter < count) {
containerSet.listContainer(prevKey, step, results);
for (int y = 0; y < results.size(); y++) {
testMap.remove(results.get(y).getContainerID());
}
counter += step;
long nextKey = results.get(results.size() - 1).getContainerID();
// Assert that container is returning results in a sorted fashion.
Assert.assertTrue(prevKey < nextKey);
prevKey = nextKey + 1;
results.clear();
}
// Assert that we listed all the keys that we had put into
// container.
Assert.assertTrue(testMap.isEmpty());
}
use of org.apache.hadoop.ozone.container.common.interfaces.Container in project ozone by apache.
the class TestContainerPersistence method testPutBlockWithInvalidBCSId.
/**
* Tests a put block and read block with invalid bcsId.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testPutBlockWithInvalidBCSId() throws IOException, NoSuchAlgorithmException {
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID1 = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = writeChunkHelper(blockID1);
BlockData blockData = new BlockData(blockID1);
List<ContainerProtos.ChunkInfo> chunkList = new LinkedList<>();
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockData.setBlockCommitSequenceId(3);
blockManager.putBlock(container, blockData);
chunkList.clear();
// write a 2nd block
BlockID blockID2 = ContainerTestHelper.getTestBlockID(testContainerID);
info = writeChunkHelper(blockID2);
blockData = new BlockData(blockID2);
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockData.setBlockCommitSequenceId(4);
blockManager.putBlock(container, blockData);
BlockData readBlockData;
try {
blockID1.setBlockCommitSequenceId(5);
// read with bcsId higher than container bcsId
blockManager.getBlock(container, blockID1);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
Assert.assertTrue(sce.getResult() == UNKNOWN_BCSID);
}
try {
blockID1.setBlockCommitSequenceId(4);
// read with bcsId lower than container bcsId but greater than committed
// bcsId.
blockManager.getBlock(container, blockID1);
Assert.fail("Expected exception not thrown");
} catch (StorageContainerException sce) {
Assert.assertTrue(sce.getResult() == BCSID_MISMATCH);
}
readBlockData = blockManager.getBlock(container, blockData.getBlockID());
ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(0));
Assert.assertEquals(info.getChecksumData(), readChunk.getChecksumData());
}
Aggregations