Search in sources :

Example 36 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class KeyValueHandler method handleGetSmallFile.

/**
 * Handle Get Small File operation. Gets a data stream using a key. This
 * helps in reducing the RPC overhead for small files. Calls BlockManager and
 * ChunkManager to process the request.
 */
ContainerCommandResponseProto handleGetSmallFile(ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
    if (!request.hasGetSmallFile()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Malformed Get Small File request. trace ID: {}", request.getTraceID());
        }
        return malformedRequest(request);
    }
    GetSmallFileRequestProto getSmallFileReq = request.getGetSmallFile();
    try {
        BlockID blockID = BlockID.getFromProtobuf(getSmallFileReq.getBlock().getBlockID());
        checkContainerIsHealthy(kvContainer, blockID, Type.GetSmallFile);
        BlockData responseData = blockManager.getBlock(kvContainer, blockID);
        ContainerProtos.ChunkInfo chunkInfoProto = null;
        List<ByteString> dataBuffers = new ArrayList<>();
        DispatcherContext dispatcherContext = new DispatcherContext.Builder().build();
        for (ContainerProtos.ChunkInfo chunk : responseData.getChunks()) {
            // if the block is committed, all chunks must have been committed.
            // Tmp chunk files won't exist here.
            ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunk);
            boolean isReadChunkV0 = getReadChunkVersion(request.getGetSmallFile()).equals(ContainerProtos.ReadChunkVersion.V0);
            if (isReadChunkV0) {
                // For older clients, set ReadDataIntoSingleBuffer to true so that
                // all the data read from chunk file is returned as a single
                // ByteString. Older clients cannot process data returned as a list
                // of ByteStrings.
                chunkInfo.setReadDataIntoSingleBuffer(true);
            }
            ChunkBuffer data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
            dataBuffers.addAll(data.toByteStringList(byteBufferToByteString));
            chunkInfoProto = chunk;
        }
        metrics.incContainerBytesStats(Type.GetSmallFile, BufferUtils.getBuffersLen(dataBuffers));
        return getGetSmallFileResponseSuccess(request, dataBuffers, chunkInfoProto);
    } catch (StorageContainerException e) {
        return ContainerUtils.logAndReturnError(LOG, e, request);
    } catch (IOException ex) {
        return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Write Chunk failed", ex, GET_SMALL_FILE_ERROR), request);
    }
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) GetSmallFileRequestProto(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.GetSmallFileRequestProto) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) IOException(java.io.IOException) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData)

Example 37 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class FilePerChunkStrategy method readChunk.

/**
 * reads the data defined by a chunk.
 *
 * @param container - Container for the chunk
 * @param blockID - ID of the block.
 * @param info - ChunkInfo.
 * @param dispatcherContext dispatcher context info.
 * @return byte array
 * @throws StorageContainerException
 * TODO: Right now we do not support partial reads and writes of chunks.
 * TODO: Explore if we need to do that for ozone.
 */
@Override
public ChunkBuffer readChunk(Container container, BlockID blockID, ChunkInfo info, DispatcherContext dispatcherContext) throws StorageContainerException {
    checkLayoutVersion(container);
    limitReadSize(info.getLen());
    KeyValueContainer kvContainer = (KeyValueContainer) container;
    KeyValueContainerData containerData = kvContainer.getContainerData();
    HddsVolume volume = containerData.getVolume();
    // In version1, we verify checksum if it is available and return data
    // of the chunk file.
    File finalChunkFile = getChunkFile(kvContainer, blockID, info);
    List<File> possibleFiles = new ArrayList<>();
    possibleFiles.add(finalChunkFile);
    if (dispatcherContext != null && dispatcherContext.isReadFromTmpFile()) {
        possibleFiles.add(getTmpChunkFile(finalChunkFile, dispatcherContext));
        // HDDS-2372. Read finalChunkFile after tmpChunkFile to solve race
        // condition between read and commit.
        possibleFiles.add(finalChunkFile);
    }
    long len = info.getLen();
    long bufferCapacity = ChunkManager.getBufferCapacityForChunkRead(info, defaultReadBufferCapacity);
    ByteBuffer[] dataBuffers = BufferUtils.assignByteBuffers(len, bufferCapacity);
    long chunkFileOffset = 0;
    if (info.getOffset() != 0) {
        try {
            BlockData blockData = blockManager.getBlock(kvContainer, blockID);
            List<ContainerProtos.ChunkInfo> chunks = blockData.getChunks();
            String chunkName = info.getChunkName();
            boolean found = false;
            for (ContainerProtos.ChunkInfo chunk : chunks) {
                if (chunk.getChunkName().equals(chunkName)) {
                    chunkFileOffset = chunk.getOffset();
                    found = true;
                    break;
                }
            }
            if (!found) {
                throw new StorageContainerException("Cannot find chunk " + chunkName + " in block " + blockID.toString(), UNABLE_TO_FIND_CHUNK);
            }
        } catch (IOException e) {
            throw new StorageContainerException("Cannot find block " + blockID.toString() + " for chunk " + info.getChunkName(), UNABLE_TO_FIND_CHUNK);
        }
    }
    for (File file : possibleFiles) {
        try {
            if (file.exists()) {
                long offset = info.getOffset() - chunkFileOffset;
                Preconditions.checkState(offset >= 0);
                ChunkUtils.readData(file, dataBuffers, offset, len, volume);
                return ChunkBuffer.wrap(Lists.newArrayList(dataBuffers));
            }
        } catch (StorageContainerException ex) {
            // next possible location
            if (ex.getResult() != UNABLE_TO_FIND_CHUNK) {
                throw ex;
            }
            BufferUtils.clearBuffers(dataBuffers);
        }
    }
    throw new StorageContainerException("Chunk file can't be found " + possibleFiles.toString(), UNABLE_TO_FIND_CHUNK);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ArrayList(java.util.ArrayList) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) HddsVolume(org.apache.hadoop.ozone.container.common.volume.HddsVolume) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) File(java.io.File) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)

Example 38 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class FilePerChunkStrategy method deleteChunks.

@Override
public void deleteChunks(Container container, BlockData blockData) throws StorageContainerException {
    for (ContainerProtos.ChunkInfo chunk : blockData.getChunks()) {
        try {
            ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(chunk);
            deleteChunk(container, blockData.getBlockID(), chunkInfo);
        } catch (IOException e) {
            throw new StorageContainerException(e, ContainerProtos.Result.INVALID_ARGUMENT);
        }
    }
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) IOException(java.io.IOException) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)

Example 39 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestFilePerChunkStrategy method testWriteChunkStageWriteAndCommit.

@Test
public void testWriteChunkStageWriteAndCommit() throws Exception {
    ChunkManager chunkManager = createTestSubject();
    checkChunkFileCount(0);
    // As no chunks are written to the volume writeBytes should be 0
    checkWriteIOStats(0, 0);
    KeyValueContainer container = getKeyValueContainer();
    BlockID blockID = getBlockID();
    ChunkInfo chunkInfo = getChunkInfo();
    chunkManager.writeChunk(container, blockID, chunkInfo, getData(), new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.WRITE_DATA).build());
    // Now a chunk file is being written with Stage WRITE_DATA, so it should
    // create a temporary chunk file.
    checkChunkFileCount(1);
    long term = 0;
    long index = 0;
    File chunkFile = ContainerLayoutVersion.FILE_PER_CHUNK.getChunkFile(container.getContainerData(), blockID, chunkInfo);
    File tempChunkFile = new File(chunkFile.getParent(), chunkFile.getName() + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + OzoneConsts.CONTAINER_TEMPORARY_CHUNK_PREFIX + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + term + OzoneConsts.CONTAINER_CHUNK_NAME_DELIMITER + index);
    // As chunk write stage is WRITE_DATA, temp chunk file will be created.
    assertTrue(tempChunkFile.exists());
    checkWriteIOStats(chunkInfo.getLen(), 1);
    chunkManager.writeChunk(container, blockID, chunkInfo, getData(), new DispatcherContext.Builder().setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA).build());
    checkWriteIOStats(chunkInfo.getLen(), 1);
    // Old temp file should have been renamed to chunk file.
    checkChunkFileCount(1);
    // As commit happened, chunk file should exist.
    assertTrue(chunkFile.exists());
    assertFalse(tempChunkFile.exists());
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) DispatcherContext(org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext) BlockID(org.apache.hadoop.hdds.client.BlockID) File(java.io.File) ChunkManager(org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Test(org.junit.Test)

Example 40 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestContainerReader method addBlocks.

private List<Long> addBlocks(KeyValueContainer keyValueContainer, boolean setMetaData) throws Exception {
    long containerId = keyValueContainer.getContainerData().getContainerID();
    List<Long> blkNames = new ArrayList<>();
    try (ReferenceCountedDB metadataStore = BlockUtils.getDB(keyValueContainer.getContainerData(), conf)) {
        for (int i = 0; i < blockCount; i++) {
            // Creating BlockData
            BlockID blockID = new BlockID(containerId, i);
            BlockData blockData = new BlockData(blockID);
            blockData.addMetadata(OzoneConsts.VOLUME, OzoneConsts.OZONE);
            blockData.addMetadata(OzoneConsts.OWNER, OzoneConsts.OZONE_SIMPLE_HDFS_USER);
            List<ContainerProtos.ChunkInfo> chunkList = new ArrayList<>();
            long localBlockID = blockID.getLocalID();
            ChunkInfo info = new ChunkInfo(String.format("%d.data.%d", localBlockID, 0), 0, blockLen);
            chunkList.add(info.getProtoBufMessage());
            blockData.setChunks(chunkList);
            blkNames.add(localBlockID);
            metadataStore.getStore().getBlockDataTable().put(Long.toString(localBlockID), blockData);
        }
        if (setMetaData) {
            metadataStore.getStore().getMetadataTable().put(OzoneConsts.BLOCK_COUNT, (long) blockCount);
            metadataStore.getStore().getMetadataTable().put(OzoneConsts.CONTAINER_BYTES_USED, blockCount * blockLen);
        }
    }
    return blkNames;
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) ArrayList(java.util.ArrayList) BlockID(org.apache.hadoop.hdds.client.BlockID) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Aggregations

ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)40 BlockID (org.apache.hadoop.hdds.client.BlockID)28 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)19 Test (org.junit.Test)19 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)18 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)14 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)11 ArrayList (java.util.ArrayList)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)10 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)10 DispatcherContext (org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext)8 File (java.io.File)7 IOException (java.io.IOException)7 LinkedList (java.util.LinkedList)6 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)6 ByteBuffer (java.nio.ByteBuffer)5 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)5 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)4 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)3 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)3