Search in sources :

Example 21 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class KeyValueHandler method handleGetSmallFile.

/**
 * Handle Get Small File operation. Gets a data stream using a key. This
 * helps in reducing the RPC overhead for small files. Calls BlockManager and
 * ChunkManager to process the request.
 */
ContainerCommandResponseProto handleGetSmallFile(ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
    if (!request.hasGetSmallFile()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Malformed Get Small File request. trace ID: {}", request.getTraceID());
        }
        return malformedRequest(request);
    }
    GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile();
    try {
        BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock().getBlockID());
        checkContainerIsHealthy(kvContainer, blockID, Type.GetSmallFile);
        BlockData responseData = blockManager.getBlock(kvContainer, blockID);
        ContainerProtos.ChunkInfo chunkInfoProto = null;
        List<ByteString> dataBuffers = new ArrayList<>();
        DispatcherContext dispatcherContext = new DispatcherContext.Builder().build();
        for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) {
            // if the block is committed, all chunks must have been committed.
            // Tmp chunk files won't exist here.
            ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunk);
            boolean isReadChunkV0 = getReadChunkVersion(request.getGetSmallFile()).equals(ContainerProtos.ReadChunkVersion.V0);
            if (isReadChunkV0) {
                // For older clients, set ReadDataIntoSingleBuffer to true so that
                // all the data read from chunk file is returned as a single
                // ByteString. Older clients cannot process data returned as a list
                // of ByteStrings.
                chunkInfo.setReadDataIntoSingleBuffer(true);
            }
            ChunkBuffer data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
            dataBuffers.addAll(data.toByteStringList(byteBufferToByteString));
            chunkInfoProto = chunk;
        }
        metrics.incContainerBytesStats(Type.GetSmallFile, BufferUtils.getBuffersLen(dataBuffers));
        return getGetSmallFileResponseSuccess(request, dataBuffers, chunkInfoProto);
    } catch (StorageContainerException e) {
        return ContainerUtils.logAndReturnError(LOG, e, request);
    } catch (IOException ex) {
        return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Write Chunk failed", ex, GET_SMALL_FILE_ERROR), request);
    }
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) GetSmallFileRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetSmallFileRequestProto) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) IOException(java.io.IOException) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData)

Example 22 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class ChunkManagerDispatcher method readChunk.

@Override
public ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException {
    ChunkBuffer data = selectHandler(container).readChunk(container, blockID, info, dispatcherContext);
    Preconditions.checkState(data != null);
    container.getContainerData().updateReadStats(data.remaining());
    return data;
}
Also used : ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer)

Example 23 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestBlockDeletingService method createToDeleteBlocks.

/**
 * A helper method to create some blocks and put them under deletion
 * state for testing. This method directly updates container.db and
 * creates some fake chunk files for testing.
 */
private void createToDeleteBlocks(ContainerSet containerSet, int numOfContainers, int numOfBlocksPerContainer, int numOfChunksPerBlock) throws IOException {
    ChunkManager chunkManager;
    if (layout == FILE_PER_BLOCK) {
        chunkManager = new FilePerBlockStrategy(true, null, null);
    } else {
        chunkManager = new FilePerChunkStrategy(true, null, null);
    }
    byte[] arr = randomAlphanumeric(1048576).getBytes(UTF_8);
    ChunkBuffer buffer = ChunkBuffer.wrap(ByteBuffer.wrap(arr));
    int txnID = 0;
    for (int x = 0; x < numOfContainers; x++) {
        long containerID = ContainerTestHelper.getTestContainerID();
        KeyValueContainerData data = new KeyValueContainerData(containerID, layout, ContainerTestHelper.CONTAINER_MAX_SIZE, UUID.randomUUID().toString(), datanodeUuid);
        data.closeContainer();
        data.setSchemaVersion(schemaVersion);
        KeyValueContainer container = new KeyValueContainer(data, conf);
        container.create(volumeSet, new RoundRobinVolumeChoosingPolicy(), scmId);
        containerSet.addContainer(container);
        data = (KeyValueContainerData) containerSet.getContainer(containerID).getContainerData();
        data.setSchemaVersion(schemaVersion);
        if (schemaVersion.equals(SCHEMA_V1)) {
            createPendingDeleteBlocksSchema1(numOfBlocksPerContainer, data, containerID, numOfChunksPerBlock, buffer, chunkManager, container);
        } else if (schemaVersion.equals(SCHEMA_V2)) {
            createPendingDeleteBlocksSchema2(numOfBlocksPerContainer, txnID, containerID, numOfChunksPerBlock, buffer, chunkManager, container, data);
        } else {
            throw new UnsupportedOperationException("Only schema version 1 and schema version 2 are " + "supported.");
        }
    }
}
Also used : RoundRobinVolumeChoosingPolicy(org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy) FilePerChunkStrategy(org.apache.hadoop.ozone.container.keyvalue.impl.FilePerChunkStrategy) FilePerBlockStrategy(org.apache.hadoop.ozone.container.keyvalue.impl.FilePerBlockStrategy) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)

Example 24 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestContainerUtils method redactsDataBuffers.

@Test
public void redactsDataBuffers() {
    // GIVEN
    ContainerCommandRequestProto req = getDummyCommandRequestProto(ReadChunk);
    ChunkBuffer data = ChunkBuffer.wrap(ByteBuffer.wrap("junk".getBytes(UTF_8)));
    ContainerCommandResponseProto resp = getReadChunkResponse(req, data, ByteStringConversion::safeWrap);
    // WHEN
    ContainerCommandResponseProto processed = processForDebug(resp);
    // THEN
    ContainerProtos.DataBuffers dataBuffers = processed.getReadChunk().getDataBuffers();
    assertEquals(1, dataBuffers.getBuffersCount());
    assertEquals("<redacted>", dataBuffers.getBuffers(0).toString(UTF_8));
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ByteStringConversion(org.apache.hadoop.hdds.scm.ByteStringConversion) ContainerCommandRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) ContainerCommandResponseProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto) Test(org.junit.Test)

Example 25 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestCommitWatcher method testReleaseBuffersOnException.

@Test
public void testReleaseBuffersOnException() throws Exception {
    int capacity = 2;
    BufferPool bufferPool = new BufferPool(chunkSize, capacity);
    XceiverClientManager clientManager = new XceiverClientManager(conf);
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
    Pipeline pipeline = container.getPipeline();
    long containerId = container.getContainerInfo().getContainerID();
    XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
    Assert.assertEquals(1, xceiverClient.getRefcount());
    Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
    CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
    BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
    List<XceiverClientReply> replies = new ArrayList<>();
    long length = 0;
    List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>> futures = new ArrayList<>();
    for (int i = 0; i < capacity; i++) {
        ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
        // add the data to the buffer pool
        final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
        byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
        ratisClient.sendCommandAsync(writeChunkRequest);
        ContainerProtos.ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
        XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
        final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
        length += byteBuffer.position();
        CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = reply.getResponse().thenApply(v -> {
            watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
            return v;
        });
        futures.add(future);
        watcher.getFutureMap().put(length, future);
        replies.add(reply);
    }
    Assert.assertTrue(replies.size() == 2);
    // wait on the 1st putBlock to complete
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 = futures.get(0);
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 = futures.get(1);
    future1.get();
    Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize));
    Assert.assertTrue(watcher.getFutureMap().get((long) chunkSize).equals(future1));
    // wait on 2nd putBlock to complete
    future2.get();
    Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize));
    Assert.assertTrue(watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2));
    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
    watcher.watchOnFirstIndex();
    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(0).getLogIndex()));
    Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
    Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
    cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
    cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
    try {
        // just watch for a higher index so as to ensure, it does an actual
        // call to Ratis. Otherwise, it may just return in case the commitInfoMap
        // is updated to the latest index in putBlock response.
        watcher.watchForCommit(replies.get(1).getLogIndex() + 100);
        Assert.fail("Expected exception not thrown");
    } catch (IOException ioe) {
        // with retry count set to noRetry and a lower watch request
        // timeout, watch request will eventually
        // fail with TimeoutIOException from ratis client or the client
        // can itself get AlreadyClosedException from the Ratis Server
        // and the write may fail with RaftRetryFailureException
        Throwable t = HddsClientUtils.checkForException(ioe);
        Assert.assertTrue("Unexpected exception: " + t.getClass(), t instanceof RaftRetryFailureException || t instanceof TimeoutIOException || t instanceof AlreadyClosedException || t instanceof NotReplicatedException);
    }
    if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1).getLogIndex()) {
        Assert.assertTrue(watcher.getTotalAckDataLength() == chunkSize);
        Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 1);
        Assert.assertTrue(watcher.getFutureMap().size() == 1);
    } else {
        Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
        Assert.assertTrue(watcher.getFutureMap().isEmpty());
        Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
    }
}
Also used : CommitWatcher(org.apache.hadoop.hdds.scm.storage.CommitWatcher) ArrayList(java.util.ArrayList) AlreadyClosedException(org.apache.ratis.protocol.exceptions.AlreadyClosedException) TimeoutIOException(org.apache.ratis.protocol.exceptions.TimeoutIOException) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) RaftRetryFailureException(org.apache.ratis.protocol.exceptions.RaftRetryFailureException) CompletableFuture(java.util.concurrent.CompletableFuture) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) TimeoutIOException(org.apache.ratis.protocol.exceptions.TimeoutIOException) IOException(java.io.IOException) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) BufferPool(org.apache.hadoop.hdds.scm.storage.BufferPool) NotReplicatedException(org.apache.ratis.protocol.exceptions.NotReplicatedException) BlockID(org.apache.hadoop.hdds.client.BlockID) Test(org.junit.Test)

Aggregations

ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)30 BlockID (org.apache.hadoop.hdds.client.BlockID)14 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)14 Test (org.junit.Test)13 IOException (java.io.IOException)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)9 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)8 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)8 ArrayList (java.util.ArrayList)6 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)5 CompletableFuture (java.util.concurrent.CompletableFuture)4 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)4 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)4 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)4 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)4 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)4 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)4 ByteBuffer (java.nio.ByteBuffer)3 Path (java.nio.file.Path)3 LinkedList (java.util.LinkedList)3