use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class KeyValueHandler method handleGetSmallFile.
/**
* Handle Get Small File operation. Gets a data stream using a key. This
* helps in reducing the RPC overhead for small files. Calls BlockManager and
* ChunkManager to process the request.
*/
ContainerCommandResponseProto handleGetSmallFile(ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
if (!request.hasGetSmallFile()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Malformed Get Small File request. trace ID: {}", request.getTraceID());
}
return malformedRequest(request);
}
GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile();
try {
BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock().getBlockID());
checkContainerIsHealthy(kvContainer, blockID, Type.GetSmallFile);
BlockData responseData = blockManager.getBlock(kvContainer, blockID);
ContainerProtos.ChunkInfo chunkInfoProto = null;
List<ByteString> dataBuffers = new ArrayList<>();
DispatcherContext dispatcherContext = new DispatcherContext.Builder().build();
for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) {
// if the block is committed, all chunks must have been committed.
// Tmp chunk files won't exist here.
ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunk);
boolean isReadChunkV0 = getReadChunkVersion(request.getGetSmallFile()).equals(ContainerProtos.ReadChunkVersion.V0);
if (isReadChunkV0) {
// For older clients, set ReadDataIntoSingleBuffer to true so that
// all the data read from chunk file is returned as a single
// ByteString. Older clients cannot process data returned as a list
// of ByteStrings.
chunkInfo.setReadDataIntoSingleBuffer(true);
}
ChunkBuffer data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
dataBuffers.addAll(data.toByteStringList(byteBufferToByteString));
chunkInfoProto = chunk;
}
metrics.incContainerBytesStats(Type.GetSmallFile, BufferUtils.getBuffersLen(dataBuffers));
return getGetSmallFileResponseSuccess(request, dataBuffers, chunkInfoProto);
} catch (StorageContainerException e) {
return ContainerUtils.logAndReturnError(LOG, e, request);
} catch (IOException ex) {
return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Write Chunk failed", ex, GET_SMALL_FILE_ERROR), request);
}
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class ChunkManagerDispatcher method readChunk.
@Override
public ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException {
ChunkBuffer data = selectHandler(container).readChunk(container, blockID, info, dispatcherContext);
Preconditions.checkState(data != null);
container.getContainerData().updateReadStats(data.remaining());
return data;
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestBlockDeletingService method createToDeleteBlocks.
/**
* A helper method to create some blocks and put them under deletion
* state for testing. This method directly updates container.db and
* creates some fake chunk files for testing.
*/
private void createToDeleteBlocks(ContainerSet containerSet, int numOfContainers, int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException {
ChunkManager chunkManager;
if (layout == FILE_PER_BLOCK) {
chunkManager = new FilePerBlockStrategy(true, null, null);
} else {
chunkManager = new FilePerChunkStrategy(true, null, null);
}
byte[] arr = randomAlphanumeric(1048576).getBytes(UTF_8);
ChunkBuffer buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr));
int txnID = 0;
for (int x = 0; x < numOfContainers; x++) {
long containerID = ContainerTestHelper.getTestContainerID();
KeyValueContainerData data = new KeyValueContainerData(containerID, layout, ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), datanodeUuid);
data.closeContainer();
data.setSchemaVersion(schemaVersion);
KeyValueContainer container = new KeyValueContainer(data, conf);
container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId);
containerSet.addContainer(container);
data = (KeyValueContainerData) containerSet.getContainer(containerID).getContainerData();
data.setSchemaVersion(schemaVersion);
if (schemaVersion.equals(SCHEMA_V1)) {
createPendingDeleteBlocksSchema1(numOfBlocksPerContainer, data, containerID, numOfChunksPerBlock, buffer, chunkManager, container);
} else if (schemaVersion.equals(SCHEMA_V2)) {
createPendingDeleteBlocksSchema2(numOfBlocksPerContainer, txnID, containerID, numOfChunksPerBlock, buffer, chunkManager, container, data);
} else {
throw new UnsupportedOperationException("Only schema version 1 and schema version 2 are " + "supported.");
}
}
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestContainerUtils method redactsDataBuffers.
@Test
public void redactsDataBuffers() {
// GIVEN
ContainerCommandRequestProto req = getDummyCommandRequestProto(ReadChunk);
ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap("junk".getBytes(UTF_8)));
ContainerCommandResponseProto resp = getReadChunkResponse(req, data, ByteStringConversion::safeWrap);
// WHEN
ContainerCommandResponseProto processed = processForDebug(resp);
// THEN
ContainerProtos.DataBuffers dataBuffers = processed.getReadChunk().getDataBuffers();
assertEquals(1, dataBuffers.getBuffersCount());
assertEquals("<redacted>", dataBuffers.getBuffers(0).toString(UTF_8));
}
use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.
the class TestCommitWatcher method testReleaseBuffersOnException.
@Test
public void testReleaseBuffersOnException() throws Exception {
int capacity = 2;
BufferPool bufferPool = new BufferPool(chunkSize, capacity);
XceiverClientManager clientManager = new XceiverClientManager(conf);
ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
Pipeline pipeline = container.getPipeline();
long containerId = container.getContainerInfo().getContainerID();
XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
Assert.assertEquals(1, xceiverClient.getRefcount());
Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
List<XceiverClientReply> replies = new ArrayList<>();
long length = 0;
List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>> futures = new ArrayList<>();
for (int i = 0; i < capacity; i++) {
ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
// add the data to the buffer pool
final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
ratisClient.sendCommandAsync(writeChunkRequest);
ContainerProtos.ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
length += byteBuffer.position();
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = reply.getResponse().thenApply(v -> {
watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
return v;
});
futures.add(future);
watcher.getFutureMap().put(length, future);
replies.add(reply);
}
Assert.assertTrue(replies.size() == 2);
// wait on the 1st putBlock to complete
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 = futures.get(0);
CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 = futures.get(1);
future1.get();
Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize));
Assert.assertTrue(watcher.getFutureMap().get((long) chunkSize).equals(future1));
// wait on 2nd putBlock to complete
future2.get();
Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize));
Assert.assertTrue(watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2));
Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
watcher.watchOnFirstIndex();
Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(0).getLogIndex()));
Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
try {
// just watch for a higher index so as to ensure, it does an actual
// call to Ratis. Otherwise, it may just return in case the commitInfoMap
// is updated to the latest index in putBlock response.
watcher.watchForCommit(replies.get(1).getLogIndex() + 100);
Assert.fail("Expected exception not thrown");
} catch (IOException ioe) {
// with retry count set to noRetry and a lower watch request
// timeout, watch request will eventually
// fail with TimeoutIOException from ratis client or the client
// can itself get AlreadyClosedException from the Ratis Server
// and the write may fail with RaftRetryFailureException
Throwable t = HddsClientUtils.checkForException(ioe);
Assert.assertTrue("Unexpected exception: " + t.getClass(), t instanceof RaftRetryFailureException || t instanceof TimeoutIOException || t instanceof AlreadyClosedException || t instanceof NotReplicatedException);
}
if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1).getLogIndex()) {
Assert.assertTrue(watcher.getTotalAckDataLength() == chunkSize);
Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 1);
Assert.assertTrue(watcher.getFutureMap().size() == 1);
} else {
Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
Assert.assertTrue(watcher.getFutureMap().isEmpty());
Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
}
}
Aggregations