Search in sources :

Example 16 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestFilePerBlockStrategy method testMultipleWriteSingleRead.

/**
 * This test writes data as many small writes and tries to read back the data
 * in a single large read.
 */
@Test
public void testMultipleWriteSingleRead() throws Exception {
    final int datalen = 1024;
    final int chunkCount = 1024;
    KeyValueContainer container = getKeyValueContainer();
    BlockID blockID = getBlockID();
    MessageDigest oldSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
    ChunkManager subject = createTestSubject();
    for (int x = 0; x < chunkCount; x++) {
        // we are writing to the same chunk file but at different offsets.
        long offset = x * datalen;
        ChunkInfo info = getChunk(blockID.getLocalID(), 0, offset, datalen);
        ChunkBuffer data = ContainerTestHelper.getData(datalen);
        oldSha.update(data.toByteString().asReadOnlyByteBuffer());
        data.rewind();
        setDataChecksum(info, data);
        subject.writeChunk(container, blockID, info, data, getDispatcherContext());
    }
    // Request to read the whole data in a single go.
    ChunkInfo largeChunk = getChunk(blockID.getLocalID(), 0, 0, datalen * chunkCount);
    ChunkBuffer chunk = subject.readChunk(container, blockID, largeChunk, getDispatcherContext());
    ByteBuffer newdata = chunk.toByteString().asReadOnlyByteBuffer();
    MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
    newSha.update(newdata);
    assertEquals(Hex.encodeHexString(oldSha.digest()), Hex.encodeHexString(newSha.digest()));
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) MessageDigest(java.security.MessageDigest) ByteBuffer(java.nio.ByteBuffer) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) Test(org.junit.Test)

Example 17 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class ContainerTestHelper method getWriteSmallFileRequest.

/**
 * Returns PutSmallFile Request that we can send to the container.
 *
 * @param pipeline - Pipeline
 * @param blockID - Block ID of the small file.
 * @param dataLen - Number of bytes in the data
 * @return ContainerCommandRequestProto
 */
public static ContainerCommandRequestProto getWriteSmallFileRequest(Pipeline pipeline, BlockID blockID, int dataLen) throws Exception {
    ContainerProtos.PutSmallFileRequestProto.Builder smallFileRequest = ContainerProtos.PutSmallFileRequestProto.newBuilder();
    ChunkBuffer data = getData(dataLen);
    ChunkInfo info = getChunk(blockID.getLocalID(), 0, 0, dataLen);
    setDataChecksum(info, data);
    ContainerProtos.PutBlockRequestProto.Builder putRequest = ContainerProtos.PutBlockRequestProto.newBuilder();
    BlockData blockData = new BlockData(blockID);
    List<ContainerProtos.ChunkInfo> newList = new LinkedList<>();
    newList.add(info.getProtoBufMessage());
    blockData.setChunks(newList);
    putRequest.setBlockData(blockData.getProtoBufMessage());
    smallFileRequest.setChunkInfo(info.getProtoBufMessage());
    smallFileRequest.setData(data.toByteString());
    smallFileRequest.setBlock(putRequest);
    Builder request = ContainerCommandRequestProto.newBuilder();
    request.setCmdType(ContainerProtos.Type.PutSmallFile);
    request.setContainerID(blockID.getContainerID());
    request.setPutSmallFile(smallFileRequest);
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    return request.build();
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) Builder(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) LinkedList(java.util.LinkedList)

Example 18 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class TestBufferPool method releaseAndReallocate.

@Test
public void releaseAndReallocate() {
    BufferPool pool = new BufferPool(1024, 8);
    ChunkBuffer cb1 = pool.allocateBuffer(0);
    ChunkBuffer cb2 = pool.allocateBuffer(0);
    ChunkBuffer cb3 = pool.allocateBuffer(0);
    pool.releaseBuffer(cb1);
    // current state cb2, -> cb3, cb1
    final ChunkBuffer allocated = pool.allocateBuffer(0);
    Assert.assertEquals(3, pool.getSize());
    Assert.assertEquals(cb1, allocated);
}
Also used : ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) Test(org.junit.Test)

Example 19 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class KeyValueHandler method handlePutSmallFile.

/**
 * Handle Put Small File operation. Writes the chunk and associated key
 * using a single RPC. Calls BlockManager and ChunkManager to process the
 * request.
 */
ContainerCommandResponseProto handlePutSmallFile(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
    if (!request.hasPutSmallFile()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Malformed Put Small File request. trace ID: {}", request.getTraceID());
        }
        return malformedRequest(request);
    }
    PutSmallFileRequestProto putSmallFileReq = request.getPutSmallFile();
    final ContainerProtos.BlockData blockDataProto;
    try {
        checkContainerOpen(kvContainer);
        BlockData blockData = BlockData.getFromProtoBuf(putSmallFileReq.getBlock().getBlockData());
        Preconditions.checkNotNull(blockData);
        ContainerProtos.ChunkInfo chunkInfoProto = putSmallFileReq.getChunkInfo();
        ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
        Preconditions.checkNotNull(chunkInfo);
        ChunkBuffer data = ChunkBuffer.wrap(putSmallFileReq.getData().asReadOnlyByteBufferList());
        if (dispatcherContext == null) {
            dispatcherContext = new DispatcherContext.Builder().build();
        }
        BlockID blockID = blockData.getBlockID();
        // chunks will be committed as a part of handling putSmallFile
        // here. There is no need to maintain this info in openContainerBlockMap.
        chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
        validateChunkChecksumData(data, chunkInfo);
        chunkManager.finishWriteChunks(kvContainer, blockData);
        List<ContainerProtos.ChunkInfo> chunks = new LinkedList<>();
        chunks.add(chunkInfoProto);
        blockData.setChunks(chunks);
        blockData.setBlockCommitSequenceId(dispatcherContext.getLogIndex());
        blockManager.putBlock(kvContainer, blockData);
        blockDataProto = blockData.getProtoBufMessage();
        metrics.incContainerBytesStats(Type.PutSmallFile, chunkInfo.getLen());
    } catch (StorageContainerException ex) {
        return ContainerUtils.logAndReturnError(LOG, ex, request);
    } catch (IOException ex) {
        return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Read Chunk failed", ex, PUT_SMALL_FILE_ERROR), request);
    }
    return getPutFileResponseSuccess(request, blockDataProto);
}
Also used : PutSmallFileRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutSmallFileRequestProto) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) IOException(java.io.IOException) LinkedList(java.util.LinkedList) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData)

Example 20 with ChunkBuffer

use of org.apache.hadoop.ozone.common.ChunkBuffer in project ozone by apache.

the class KeyValueHandler method handleWriteChunk.

/**
 * Handle Write Chunk operation. Calls ChunkManager to process the request.
 */
ContainerCommandResponseProto handleWriteChunk(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
    if (!request.hasWriteChunk()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Malformed Write Chunk request. trace ID: {}", request.getTraceID());
        }
        return malformedRequest(request);
    }
    try {
        checkContainerOpen(kvContainer);
        WriteChunkRequestProto writeChunk = request.getWriteChunk();
        BlockID blockID = BlockID.getFromProtobuf(writeChunk.getBlockID());
        ContainerProtos.ChunkInfo chunkInfoProto = writeChunk.getChunkData();
        ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunkInfoProto);
        Preconditions.checkNotNull(chunkInfo);
        ChunkBuffer data = null;
        if (dispatcherContext == null) {
            dispatcherContext = new DispatcherContext.Builder().build();
        }
        WriteChunkStage stage = dispatcherContext.getStage();
        if (stage == WriteChunkStage.WRITE_DATA || stage == WriteChunkStage.COMBINED) {
            data = ChunkBuffer.wrap(writeChunk.getData().asReadOnlyByteBufferList());
            validateChunkChecksumData(data, chunkInfo);
        }
        chunkManager.writeChunk(kvContainer, blockID, chunkInfo, data, dispatcherContext);
        // We should increment stats after writeChunk
        if (stage == WriteChunkStage.WRITE_DATA || stage == WriteChunkStage.COMBINED) {
            metrics.incContainerBytesStats(Type.WriteChunk, writeChunk.getChunkData().getLen());
        }
    } catch (StorageContainerException ex) {
        return ContainerUtils.logAndReturnError(LOG, ex, request);
    } catch (IOException ex) {
        return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Write Chunk failed", ex, IO_EXCEPTION), request);
    }
    return getSuccessResponse(request);
}
Also used : WriteChunkRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) WriteChunkStage(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) IOException(java.io.IOException)

Aggregations

ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)30 BlockID (org.apache.hadoop.hdds.client.BlockID)14 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)14 Test (org.junit.Test)13 IOException (java.io.IOException)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)9 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)8 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)8 ArrayList (java.util.ArrayList)6 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)5 CompletableFuture (java.util.concurrent.CompletableFuture)4 ContainerCommandResponseProto (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto)4 XceiverClientReply (org.apache.hadoop.hdds.scm.XceiverClientReply)4 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)4 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)4 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)4 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)4 ByteBuffer (java.nio.ByteBuffer)3 Path (java.nio.file.Path)3 LinkedList (java.util.LinkedList)3