use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestContainerPersistence method testOverWrite.
/**
* Writes a single chunk and tries to overwrite that chunk without over write
* flag then re-tries with overwrite flag.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testOverWrite() throws IOException, NoSuchAlgorithmException {
final int datalen = 1024;
long testContainerID = getTestContainerID();
KeyValueContainer container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
data.rewind();
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
data.rewind();
// With the overwrite flag it should work now.
info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true");
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
long bytesUsed = container.getContainerData().getBytesUsed();
Assert.assertEquals(datalen, bytesUsed);
long bytesWrite = container.getContainerData().getWriteBytes();
Assert.assertEquals(datalen * 3, bytesWrite);
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestBlockManagerImpl method testListBlock.
@Test
public void testListBlock() throws Exception {
blockManager.putBlock(keyValueContainer, blockData);
List<BlockData> listBlockData = blockManager.listBlock(keyValueContainer, 1, 10);
assertNotNull(listBlockData);
assertTrue(listBlockData.size() == 1);
for (long i = 2; i <= 10; i++) {
blockID = new BlockID(1L, i);
blockData = new BlockData(blockID);
blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
blockData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 0, 1024);
chunkList.add(info.getProtoBufMessage());
blockData.setChunks(chunkList);
blockManager.putBlock(keyValueContainer, blockData);
}
listBlockData = blockManager.listBlock(keyValueContainer, 1, 10);
assertNotNull(listBlockData);
assertTrue(listBlockData.size() == 10);
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestFilePerBlockStrategy method testDeletePartialChunkWithOffsetUnsupportedRequest.
@Test
public void testDeletePartialChunkWithOffsetUnsupportedRequest() {
// GIVEN
ChunkManager chunkManager = createTestSubject();
try {
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
chunkManager.writeChunk(container, blockID, getChunkInfo(), getData(), getDispatcherContext());
ChunkInfo chunkInfo = new ChunkInfo(String.format("%d.data.%d", blockID.getLocalID(), 0), 123, getChunkInfo().getLen());
// WHEN
chunkManager.deleteChunk(container, blockID, chunkInfo);
// THEN
fail("testDeleteChunkUnsupportedRequest");
} catch (StorageContainerException ex) {
assertEquals(ContainerProtos.Result.UNSUPPORTED_REQUEST, ex.getResult());
}
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestFilePerBlockStrategy method testPartialRead.
/**
* Test partial within a single chunk.
*/
@Test
public void testPartialRead() throws Exception {
final int datalen = 1024;
final int start = datalen / 4;
final int length = datalen / 2;
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, datalen);
ChunkBuffer data = ContainerTestHelper.getData(datalen);
setDataChecksum(info, data);
DispatcherContext ctx = getDispatcherContext();
ChunkManager subject = createTestSubject();
subject.writeChunk(container, blockID, info, data, ctx);
ChunkBuffer readData = subject.readChunk(container, blockID, info, ctx);
// data will be ChunkBufferImplWithByteBuffer and readData will return
// ChunkBufferImplWithByteBufferList. Hence, convert both ByteStrings
// before comparing.
assertEquals(data.rewind().toByteString(), readData.rewind().toByteString());
ChunkInfo info2 = getChunk(blockID.getLocalID(), 0, start, length);
ChunkBuffer readData2 = subject.readChunk(container, blockID, info2, ctx);
assertEquals(length, info2.getLen());
assertEquals(data.rewind().toByteString().substring(start, start + length), readData2.rewind().toByteString());
}
use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.
the class TestFilePerBlockStrategy method testMultipleWriteSingleRead.
/**
* This test writes data as many small writes and tries to read back the data
* in a single large read.
*/
@Test
public void testMultipleWriteSingleRead() throws Exception {
final int datalen = 1024;
final int chunkCount = 1024;
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
MessageDigest oldSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
ChunkManager subject = createTestSubject();
for (int x = 0; x < chunkCount; x++) {
// we are writing to the same chunk file but at different offsets.
long offset = x * datalen;
ChunkInfo info = getChunk(blockID.getLocalID(), 0, offset, datalen);
ChunkBuffer data = ContainerTestHelper.getData(datalen);
oldSha.update(data.toByteString().asReadOnlyByteBuffer());
data.rewind();
setDataChecksum(info, data);
subject.writeChunk(container, blockID, info, data, getDispatcherContext());
}
// Request to read the whole data in a single go.
ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount);
ChunkBuffer chunk = subject.readChunk(container, blockID, largeChunk, getDispatcherContext());
ByteBuffer newdata = chunk.toByteString().asReadOnlyByteBuffer();
MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
newSha.update(newdata);
assertEquals(Hex.encodeHexString(oldSha.digest()), Hex.encodeHexString(newSha.digest()));
}
Aggregations