use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class ContainerTestHelper method newWriteChunkRequestBuilder.
public static Builder newWriteChunkRequestBuilder(Pipeline pipeline, BlockID blockID, int datalen, int seq) throws IOException {
LOG.trace("writeChunk {} (blockID={}) to pipeline={}", datalen, blockID, pipeline);
ContainerProtos.WriteChunkRequestProto.Builder writeRequest = ContainerProtos.WriteChunkRequestProto.newBuilder();
writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
ChunkBuffer data = getData(datalen);
ChunkInfo info = getChunk(blockID.getLocalID(), seq, 0, datalen);
setDataChecksum(info, data);
writeRequest.setChunkData(info.getProtoBufMessage());
writeRequest.setData(data.toByteString());
Builder request = ContainerCommandRequestProto.newBuilder();
request.setCmdType(ContainerProtos.Type.WriteChunk);
request.setContainerID(blockID.getContainerID());
request.setWriteChunk(writeRequest);
request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
return request;
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class KeyValueHandler method handleReadChunk.
/**
* Handle Read Chunk operation. Calls ChunkManager to process the request.
*/
ContainerCommandResponseProto handleReadChunk(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
if (!request.hasReadChunk()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Malformed Read Chunk request. trace ID: {}", request.getTraceID());
}
return malformedRequest(request);
}
ChunkBuffer data;
try {
BlockID blockID = BlockID.getFromProtobuf(request.getReadChunk().getBlockID());
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk().getChunkData());
Preconditions.checkNotNull(chunkInfo);
checkContainerIsHealthy(kvContainer, blockID, Type.ReadChunk);
BlockUtils.verifyBCSId(kvContainer, blockID);
if (dispatcherContext == null) {
dispatcherContext = new DispatcherContext.Builder().build();
}
boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()).equals(ContainerProtos.ReadChunkVersion.V0);
if (isReadChunkV0) {
// For older clients, set ReadDataIntoSingleBuffer to true so that
// all the data read from chunk file is returned as a single
// ByteString. Older clients cannot process data returned as a list
// of ByteStrings.
chunkInfo.setReadDataIntoSingleBuffer(true);
}
data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
// For client reads, the client is expected to validate.
if (dispatcherContext.isReadFromTmpFile()) {
validateChunkChecksumData(data, chunkInfo);
}
metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen());
} catch (StorageContainerException ex) {
return ContainerUtils.logAndReturnError(LOG, ex, request);
} catch (IOException ex) {
return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Read Chunk failed", ex, IO_EXCEPTION), request);
}
Preconditions.checkNotNull(data, "Chunk data is null");
return getReadChunkResponse(request, data, byteBufferToByteString);
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class ChunkManager method writeChunk.
default void writeChunk(Container container, BlockID blockID, ChunkInfo info, ByteBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
ChunkBuffer wrapper = ChunkBuffer.wrap(data);
writeChunk(container, blockID, info, wrapper, dispatcherContext);
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestCommitWatcher method testReleaseBuffers.
@Test
public void testReleaseBuffers() throws Exception {
int capacity = 2;
BufferPool bufferPool = new BufferPool(chunkSize, capacity);
XceiverClientManager clientManager = new XceiverClientManager(conf);
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
Pipeline pipeline = container.getPipeline();
long containerId = container.getContainerInfo().getContainerID();
XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
Assert.assertEquals(1, xceiverClient.getRefcount());
Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
List<XceiverClientReply> replies = new ArrayList<>();
long length = 0;
List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>> futures = new ArrayList<>();
for (int i = 0; i < capacity; i++) {
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
// add the data to the buffer pool
final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
ratisClient.sendCommandAsync(writeChunkRequest);
ContainerProtos.ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
length += byteBuffer.position();
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = reply.getResponse().thenApply(v -> {
watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
return v;
});
futures.add(future);
watcher.getFutureMap().put(length, future);
replies.add(reply);
}
Assert.assertTrue(replies.size() == 2);
// wait on the 1st putBlock to complete
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 = futures.get(0);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 = futures.get(1);
future1.get();
Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize));
Assert.assertTrue(watcher.getFutureMap().get((long) chunkSize).equals(future1));
// wait on 2nd putBlock to complete
future2.get();
Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize));
Assert.assertTrue(watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2));
Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
watcher.watchOnFirstIndex();
Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(0).getLogIndex()));
Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
watcher.watchOnLastIndex();
Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(1).getLogIndex()));
Assert.assertFalse(watcher.getFutureMap().containsKey(2 * chunkSize));
Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
Assert.assertTrue(watcher.getFutureMap().isEmpty());
Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestContainerPersistence method testPutBlockWithLotsOfChunks.
/**
* Tests a put block and read block.
*
* @throws IOException
* @throws NoSuchAlgorithmException
*/
@Test
public void testPutBlockWithLotsOfChunks() throws IOException, NoSuchAlgorithmException {
final int chunkCount = 2;
final int datalen = 1024;
long totalSize = 0L;
long testContainerID = getTestContainerID();
Container container = addContainer(containerSet, testContainerID);
BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
List<ChunkInfo> chunkList = new LinkedList<>();
for (int x = 0; x < chunkCount; x++) {
ChunkInfo info = new ChunkInfo(String.format("%d.data", blockID.getLocalID()), x * datalen, datalen);
ChunkBuffer data = getData(datalen);
setDataChecksum(info, data);
chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
totalSize += datalen;
chunkList.add(info);
}
long bytesUsed = container.getContainerData().getBytesUsed();
Assert.assertEquals(totalSize, bytesUsed);
long writeBytes = container.getContainerData().getWriteBytes();
Assert.assertEquals(chunkCount * datalen, writeBytes);
long readCount = container.getContainerData().getReadCount();
Assert.assertEquals(0, readCount);
long writeCount = container.getContainerData().getWriteCount();
Assert.assertEquals(chunkCount, writeCount);
BlockData blockData = new BlockData(blockID);
List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
for (ChunkInfo i : chunkList) {
chunkProtoList.add(i.getProtoBufMessage());
}
blockData.setChunks(chunkProtoList);
blockManager.putBlock(container, blockData);
BlockData readBlockData = blockManager.getBlock(container, blockData.getBlockID());
ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData.getChunks().size() - 1));
Assert.assertEquals(lastChunk.getChecksumData(), readChunk.getChecksumData());
}
Aggregations