use of org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager in project ozone by apache.
the class TestChunkManagerDummyImpl method dummyManagerReadsAnyChunk.
@Test
public void dummyManagerReadsAnyChunk() throws Exception {
ChunkManager dummy = createTestSubject();
ChunkBuffer dataRead = dummy.readChunk(getKeyValueContainer(), getBlockID(), getChunkInfo(), getDispatcherContext());
assertNotNull(dataRead);
}
use of org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager in project ozone by apache.
the class TestFilePerChunkStrategy method deletesChunkFileWithLengthIncludingOffset.
/**
* Tests that "new datanode" can delete chunks written to "old
* datanode" by "new client" (ie. where chunk file accidentally created with
* {@code size = chunk offset + chunk length}, instead of only chunk length).
*/
@Test
public void deletesChunkFileWithLengthIncludingOffset() throws Exception {
// GIVEN
ChunkManager chunkManager = createTestSubject();
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
ChunkInfo chunkInfo = getChunkInfo();
long offset = 1024;
ChunkInfo oldDatanodeChunkInfo = new ChunkInfo(chunkInfo.getChunkName(), offset, chunkInfo.getLen());
File file = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, chunkInfo);
ChunkUtils.writeData(file, ChunkBuffer.wrap(getData()), offset, chunkInfo.getLen(), null, true);
checkChunkFileCount(1);
assertTrue(file.exists());
assertEquals(offset + chunkInfo.getLen(), file.length());
// WHEN
chunkManager.deleteChunk(container, blockID, oldDatanodeChunkInfo);
// THEN
checkChunkFileCount(0);
assertFalse(file.exists());
}
use of org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager in project ozone by apache.
the class CommonChunkManagerTestCases method testReadOversizeChunk.
@Test
public void testReadOversizeChunk() throws IOException {
// GIVEN
ChunkManager chunkManager = createTestSubject();
DispatcherContext dispatcherContext = getDispatcherContext();
KeyValueContainer container = getKeyValueContainer();
int tooLarge = OZONE_SCM_CHUNK_MAX_SIZE + 1;
byte[] array = RandomStringUtils.randomAscii(tooLarge).getBytes(UTF_8);
assertTrue(array.length >= tooLarge);
BlockID blockID = getBlockID();
ChunkInfo chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, array.length);
// write chunk bypassing size limit
File chunkFile = getStrategy().getLayout().getChunkFile(getKeyValueContainerData(), blockID, chunkInfo);
FileUtils.writeByteArrayToFile(chunkFile, array);
// WHEN+THEN
assertThrows(StorageContainerException.class, () -> chunkManager.readChunk(container, blockID, chunkInfo, dispatcherContext));
}
use of org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager in project ozone by apache.
the class CommonChunkManagerTestCases method testWriteChunkStageCombinedData.
@Test
public void testWriteChunkStageCombinedData() throws Exception {
// GIVEN
ChunkManager chunkManager = createTestSubject();
checkChunkFileCount(0);
checkWriteIOStats(0, 0);
chunkManager.writeChunk(getKeyValueContainer(), getBlockID(), getChunkInfo(), getData(), getDispatcherContext());
// THEN
checkChunkFileCount(1);
checkWriteIOStats(getChunkInfo().getLen(), 1);
}
use of org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager in project ozone by apache.
the class CommonChunkManagerTestCases method testWriteReadChunk.
@Test
public void testWriteReadChunk() throws Exception {
// GIVEN
ChunkManager chunkManager = createTestSubject();
checkWriteIOStats(0, 0);
DispatcherContext dispatcherContext = getDispatcherContext();
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
ChunkInfo chunkInfo = getChunkInfo();
chunkManager.writeChunk(container, blockID, chunkInfo, getData(), dispatcherContext);
checkWriteIOStats(chunkInfo.getLen(), 1);
checkReadIOStats(0, 0);
BlockData blockData = new BlockData(blockID);
blockData.addChunk(chunkInfo.getProtoBufMessage());
getBlockManager().putBlock(container, blockData);
ByteBuffer expectedData = chunkManager.readChunk(container, blockID, chunkInfo, dispatcherContext).toByteString().asReadOnlyByteBuffer();
// THEN
assertEquals(chunkInfo.getLen(), expectedData.remaining());
assertEquals(expectedData.rewind(), rewindBufferToDataStart());
checkReadIOStats(expectedData.limit(), 1);
}
Aggregations