use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestFilePerBlockStrategy method testMultipleWriteSingleRead.
/**
* This test writes data as many small writes and tries to read back the data
* in a single large read.
*/
@Test
public void testMultipleWriteSingleRead() throws Exception {
final int datalen = 1024;
final int chunkCount = 1024;
KeyValueContainer container = getKeyValueContainer();
BlockID blockID = getBlockID();
MessageDigest oldSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
ChunkManager subject = createTestSubject();
for (int x = 0; x < chunkCount; x++) {
// we are writing to the same chunk file but at different offsets.
long offset = x * datalen;
ChunkInfo info = getChunk(blockID.getLocalID(), 0, offset, datalen);
ChunkBuffer data = ContainerTestHelper.getData(datalen);
oldSha.update(data.toByteString().asReadOnlyByteBuffer());
data.rewind();
setDataChecksum(info, data);
subject.writeChunk(container, blockID, info, data, getDispatcherContext());
}
// Request to read the whole data in a single go.
ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount);
ChunkBuffer chunk = subject.readChunk(container, blockID, largeChunk, getDispatcherContext());
ByteBuffer newdata = chunk.toByteString().asReadOnlyByteBuffer();
MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
newSha.update(newdata);
assertEquals(Hex.encodeHexString(oldSha.digest()), Hex.encodeHexString(newSha.digest()));
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class ContainerTestHelper method getWriteSmallFileRequest.
/**
* Returns PutSmallFile Request that we can send to the container.
*
* @param pipeline - Pipeline
* @param blockID - Block ID of the small file.
* @param dataLen - Number of bytes in the data
* @return ContainerCommandRequestProto
*/
public static ContainerCommandRequestProto getWriteSmallFileRequest(Pipeline pipeline, BlockID blockID, int dataLen) throws Exception {
ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest = ContainerProtos.PutSmallFileRequestProto.newBuilder();
ChunkBuffer data = getData(dataLen);
ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen);
setDataChecksum(info, data);
ContainerProtos.PutBlockRequestProto.Builder putRequest = ContainerProtos.PutBlockRequestProto.newBuilder();
BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
newList.add(info.getProtoBufMessage());
blockData.setChunks(newList);
putRequest.setBlockData(blockData.getProtoBufMessage());
smallFileRequest.setChunkInfo(info.getProtoBufMessage());
smallFileRequest.setData(data.toByteString());
smallFileRequest.setBlock(putRequest);
Builder request = ContainerCommandRequestProto.newBuilder();
request.setCmdType(ContainerProtos.Type.PutSmallFile);
request.setContainerID(blockID.getContainerID());
request.setPutSmallFile(smallFileRequest);
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
return request.build();
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestBufferPool method releaseAndReallocate.
@Test
public void releaseAndReallocate() {
BufferPool pool = new BufferPool(1024, 8);
ChunkBuffer cb1 = pool.allocateBuffer(0);
ChunkBuffer cb2 = pool.allocateBuffer(0);
ChunkBuffer cb3 = pool.allocateBuffer(0);
pool.releaseBuffer(cb1);
// current state cb2, -> cb3, cb1
final ChunkBuffer allocated = pool.allocateBuffer(0);
Assert.assertEquals(3, pool.getSize());
Assert.assertEquals(cb1, allocated);
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class KeyValueHandler method handlePutSmallFile.
/**
* Handle Put Small File operation. Writes the chunk and associated key
* using a single RPC. Calls BlockManager and ChunkManager to process the
* request.
*/
ContainerCommandResponseProto handlePutSmallFile(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
if (!request.hasPutSmallFile()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Malformed Put Small File request. trace ID: {}", request.getTraceID());
}
return malformedRequest(request);
}
PutSmallFileRequestProto putSmallFileReq = request.getPutSmallFile();
final ContainerProtos.BlockData blockDataProto;
try {
checkContainerOpen(kvContainer);
BlockData blockData = BlockData.getFromProtoBuf(putSmallFileReq.getBlock().getBlockData());
Preconditions.checkNotNull(blockData);
ContainerProtos.ChunkInfo chunkInfoProto = putSmallFileReq.getChunkInfo();
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
Preconditions.checkNotNull(chunkInfo);
ChunkBuffer data = ChunkBuffer.wrap(putSmallFileReq.getData().asReadOnlyByteBufferList());
if (dispatcherContext == null) {
dispatcherContext = new DispatcherContext.Builder().build();
}
BlockID blockID = blockData.getBlockID();
// chunks will be committed as a part of handling putSmallFile
// here. There is no need to maintain this info in openContainerBlockMap.
chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
validateChunkChecksumData(data, chunkInfo);
chunkManager.finishWriteChunks(kvContainer, blockData);
List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
chunks.add(chunkInfoProto);
blockData.setChunks(chunks);
blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex());
blockManager.putBlock(kvContainer, blockData);
blockDataProto = blockData.getProtoBufMessage();
metrics.incContainerBytesStats(Type.PutSmallFile, chunkInfo.getLen());
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Read Chunk failed", ex, PUT_SMALL_FILE_ERROR), request);
}
return getPutFileResponseSuccess(request, blockDataProto);
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class KeyValueHandler method handleWriteChunk.
/**
* Handle Write Chunk operation. Calls ChunkManager to process the request.
*/
ContainerCommandResponseProto handleWriteChunk(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
if (!request.hasWriteChunk()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Malformed Write Chunk request. trace ID: {}", request.getTraceID());
}
return malformedRequest(request);
}
try {
checkContainerOpen(kvContainer);
WriteChunkRequestProto writeChunk = request.getWriteChunk();
BlockID blockID = BlockID.getFromProtobuf(writeChunk.getBlockID());
ContainerProtos.ChunkInfo chunkInfoProto = writeChunk.getChunkData();
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
Preconditions.checkNotNull(chunkInfo);
ChunkBuffer data = null;
if (dispatcherContext == null) {
dispatcherContext = new DispatcherContext.Builder().build();
}
WriteChunkStage stage = dispatcherContext.getStage();
if (stage == WriteChunkStage.WRITE_DATA || stage == WriteChunkStage.COMBINED) {
data = ChunkBuffer.wrap(writeChunk.getData().asReadOnlyByteBufferList());
validateChunkChecksumData(data, chunkInfo);
}
chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
// We should increment stats after writeChunk
if (stage == WriteChunkStage.WRITE_DATA || stage == WriteChunkStage.COMBINED) {
metrics.incContainerBytesStats(Type.WriteChunk, writeChunk.getChunkData().getLen());
}
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Write Chunk failed", ex, IO_EXCEPTION), request);
}
return getSuccessResponse(request);
}
Aggregations