Search in sources :

Example 1 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class ContainerTestHelper method newWriteChunkRequestBuilder.

public static Builder newWriteChunkRequestBuilder(Pipeline pipeline, BlockID blockID, int datalen, int seq) throws IOException {
    LOG.trace("writeChunk {} (blockID={}) to pipeline={}", datalen, blockID, pipeline);
    ContainerProtos.WriteChunkRequestProto.Builder writeRequest = ContainerProtos.WriteChunkRequestProto.newBuilder();
    writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
    ChunkBuffer data = getData(datalen);
    ChunkInfo info = getChunk(blockID.getLocalID(), seq, 0, datalen);
    setDataChecksum(info, data);
    writeRequest.setChunkData(info.getProtoBufMessage());
    writeRequest.setData(data.toByteString());
    Builder request = ContainerCommandRequestProto.newBuilder();
    request.setCmdType(ContainerProtos.Type.WriteChunk);
    request.setContainerID(blockID.getContainerID());
    request.setWriteChunk(writeRequest);
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    return request;
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) Builder(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer)

Example 2 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class KeyValueHandler method handleReadChunk.

/**
 * Handle Read Chunk operation. Calls ChunkManager to process the request.
 */
ContainerCommandResponseProto handleReadChunk(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
    if (!request.hasReadChunk()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Malformed Read Chunk request. trace ID: {}", request.getTraceID());
        }
        return malformedRequest(request);
    }
    ChunkBuffer data;
    try {
        BlockID blockID = BlockID.getFromProtobuf(request.getReadChunk().getBlockID());
        ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk().getChunkData());
        Preconditions.checkNotNull(chunkInfo);
        checkContainerIsHealthy(kvContainer, blockID, Type.ReadChunk);
        BlockUtils.verifyBCSId(kvContainer, blockID);
        if (dispatcherContext == null) {
            dispatcherContext = new DispatcherContext.Builder().build();
        }
        boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()).equals(ContainerProtos.ReadChunkVersion.V0);
        if (isReadChunkV0) {
            // For older clients, set ReadDataIntoSingleBuffer to true so that
            // all the data read from chunk file is returned as a single
            // ByteString. Older clients cannot process data returned as a list
            // of ByteStrings.
            chunkInfo.setReadDataIntoSingleBuffer(true);
        }
        data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
        // For client reads, the client is expected to validate.
        if (dispatcherContext.isReadFromTmpFile()) {
            validateChunkChecksumData(data, chunkInfo);
        }
        metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen());
    } catch (StorageContainerException ex) {
        return ContainerUtils.logAndReturnError(LOG, ex, request);
    } catch (IOException ex) {
        return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Read Chunk failed", ex, IO_EXCEPTION), request);
    }
    Preconditions.checkNotNull(data, "Chunk data is null");
    return getReadChunkResponse(request, data, byteBufferToByteString);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) IOException(java.io.IOException)

Example 3 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class ChunkManager method writeChunk.

default void writeChunk(Container container, BlockID blockID, ChunkInfo info, ByteBuffer data, DispatcherContext dispatcherContext) throws StorageContainerException {
    ChunkBuffer wrapper = ChunkBuffer.wrap(data);
    writeChunk(container, blockID, info, wrapper, dispatcherContext);
}
Also used : ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer)

Example 4 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestCommitWatcher method testReleaseBuffers.

@Test
public void testReleaseBuffers() throws Exception {
    int capacity = 2;
    BufferPool bufferPool = new BufferPool(chunkSize, capacity);
    XceiverClientManager clientManager = new XceiverClientManager(conf);
    ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, OzoneConsts.OZONE);
    Pipeline pipeline = container.getPipeline();
    long containerId = container.getContainerInfo().getContainerID();
    XceiverClientSpi xceiverClient = clientManager.acquireClient(pipeline);
    Assert.assertEquals(1, xceiverClient.getRefcount());
    Assert.assertTrue(xceiverClient instanceof XceiverClientRatis);
    XceiverClientRatis ratisClient = (XceiverClientRatis) xceiverClient;
    CommitWatcher watcher = new CommitWatcher(bufferPool, ratisClient);
    BlockID blockID = ContainerTestHelper.getTestBlockID(containerId);
    List<XceiverClientReply> replies = new ArrayList<>();
    long length = 0;
    List<CompletableFuture<ContainerProtos.ContainerCommandResponseProto>> futures = new ArrayList<>();
    for (int i = 0; i < capacity; i++) {
        ContainerProtos.ContainerCommandRequestProto writeChunkRequest = ContainerTestHelper.getWriteChunkRequest(pipeline, blockID, chunkSize, null);
        // add the data to the buffer pool
        final ChunkBuffer byteBuffer = bufferPool.allocateBuffer(0);
        byteBuffer.put(writeChunkRequest.getWriteChunk().getData());
        ratisClient.sendCommandAsync(writeChunkRequest);
        ContainerProtos.ContainerCommandRequestProto putBlockRequest = ContainerTestHelper.getPutBlockRequest(pipeline, writeChunkRequest.getWriteChunk());
        XceiverClientReply reply = ratisClient.sendCommandAsync(putBlockRequest);
        final List<ChunkBuffer> bufferList = singletonList(byteBuffer);
        length += byteBuffer.position();
        CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future = reply.getResponse().thenApply(v -> {
            watcher.updateCommitInfoMap(reply.getLogIndex(), bufferList);
            return v;
        });
        futures.add(future);
        watcher.getFutureMap().put(length, future);
        replies.add(reply);
    }
    Assert.assertTrue(replies.size() == 2);
    // wait on the 1st putBlock to complete
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future1 = futures.get(0);
    CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future2 = futures.get(1);
    future1.get();
    Assert.assertNotNull(watcher.getFutureMap().get((long) chunkSize));
    Assert.assertTrue(watcher.getFutureMap().get((long) chunkSize).equals(future1));
    // wait on 2nd putBlock to complete
    future2.get();
    Assert.assertNotNull(watcher.getFutureMap().get((long) 2 * chunkSize));
    Assert.assertTrue(watcher.getFutureMap().get((long) 2 * chunkSize).equals(future2));
    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().size() == 2);
    watcher.watchOnFirstIndex();
    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(0).getLogIndex()));
    Assert.assertFalse(watcher.getFutureMap().containsKey(chunkSize));
    Assert.assertTrue(watcher.getTotalAckDataLength() >= chunkSize);
    watcher.watchOnLastIndex();
    Assert.assertFalse(watcher.getCommitIndex2flushedDataMap().containsKey(replies.get(1).getLogIndex()));
    Assert.assertFalse(watcher.getFutureMap().containsKey(2 * chunkSize));
    Assert.assertTrue(watcher.getTotalAckDataLength() == 2 * chunkSize);
    Assert.assertTrue(watcher.getFutureMap().isEmpty());
    Assert.assertTrue(watcher.getCommitIndex2flushedDataMap().isEmpty());
}
Also used : CommitWatcher(org.apache.hadoop.hdds.scm.storage.CommitWatcher) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ArrayList(java.util.ArrayList) XceiverClientRatis(org.apache.hadoop.hdds.scm.XceiverClientRatis) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) XceiverClientReply(org.apache.hadoop.hdds.scm.XceiverClientReply) CompletableFuture(java.util.concurrent.CompletableFuture) BufferPool(org.apache.hadoop.hdds.scm.storage.BufferPool) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Test(org.junit.Test)

Example 5 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestContainerPersistence method testPutBlockWithLotsOfChunks.

/**
 * Tests a put block and read block.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testPutBlockWithLotsOfChunks() throws IOException, NoSuchAlgorithmException {
    final int chunkCount = 2;
    final int datalen = 1024;
    long totalSize = 0L;
    long testContainerID = getTestContainerID();
    Container container = addContainer(containerSet, testContainerID);
    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
    List<ChunkInfo> chunkList = new LinkedList<>();
    for (int x = 0; x < chunkCount; x++) {
        ChunkInfo info = new ChunkInfo(String.format("%d.data", blockID.getLocalID()), x * datalen, datalen);
        ChunkBuffer data = getData(datalen);
        setDataChecksum(info, data);
        chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
        totalSize += datalen;
        chunkList.add(info);
    }
    long bytesUsed = container.getContainerData().getBytesUsed();
    Assert.assertEquals(totalSize, bytesUsed);
    long writeBytes = container.getContainerData().getWriteBytes();
    Assert.assertEquals(chunkCount * datalen, writeBytes);
    long readCount = container.getContainerData().getReadCount();
    Assert.assertEquals(0, readCount);
    long writeCount = container.getContainerData().getWriteCount();
    Assert.assertEquals(chunkCount, writeCount);
    BlockData blockData = new BlockData(blockID);
    List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
    for (ChunkInfo i : chunkList) {
        chunkProtoList.add(i.getProtoBufMessage());
    }
    blockData.setChunks(chunkProtoList);
    blockManager.putBlock(container, blockData);
    BlockData readBlockData = blockManager.getBlock(container, blockData.getBlockID());
    ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
    ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData.getChunks().size() - 1));
    Assert.assertEquals(lastChunk.getChecksumData(), readChunk.getChecksumData());
}
Also used : KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Aggregations

ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)30 BlockID (org.apache.hadoop.hdds.client.BlockID)14 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)14 Test (org.junit.Test)13 IOException (java.io.IOException)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)9 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)8 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)8 ArrayList (java.util.ArrayList)6 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)5 CompletableFuture (java.util.concurrent.CompletableFuture)4 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)4 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)4 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)4 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)4 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)4 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)4 ByteBuffer (java.nio.ByteBuffer)3 Path (java.nio.file.Path)3 LinkedList (java.util.LinkedList)3