Search in sources :

Example 1 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class ChunkKeyHandler method execute.

@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
    containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
    xceiverClientManager = containerOperationClient.getXceiverClientManager();
    ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
    address.ensureKeyAddress();
    JsonElement element;
    JsonObject result = new JsonObject();
    String volumeName = address.getVolumeName();
    String bucketName = address.getBucketName();
    String keyName = address.getKeyName();
    List<ContainerProtos.ChunkInfo> tempchunks = null;
    List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
    HashSet<String> chunkPaths = new HashSet<>();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    // querying  the keyLocations.The OM is queried to get containerID and
    // localID pertaining to a given key
    List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    // for zero-sized key
    if (locationInfos.isEmpty()) {
        System.out.println("No Key Locations Found");
        return;
    }
    ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
    JsonArray responseArrayList = new JsonArray();
    for (OmKeyLocationInfo keyLocation : locationInfos) {
        ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
        ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
        long containerId = keyLocation.getContainerID();
        chunkPaths.clear();
        Pipeline pipeline = keyLocation.getPipeline();
        if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
            pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
        }
        xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
        // Datanode is queried to get chunk information.Thus querying the
        // OM,SCM and datanode helps us get chunk location information
        ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
        // doing a getBlock on all nodes
        HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
        try {
            responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
        } catch (InterruptedException e) {
            LOG.error("Execution interrupted due to " + e);
            Thread.currentThread().interrupt();
        }
        JsonArray responseFromAllNodes = new JsonArray();
        for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
            JsonObject jsonObj = new JsonObject();
            if (entry.getValue() == null) {
                LOG.error("Cant execute getBlock on this node");
                continue;
            }
            tempchunks = entry.getValue().getBlockData().getChunksList();
            ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
            for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
                String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
                chunkPaths.add(fileName);
                ChunkDetails chunkDetails = new ChunkDetails();
                chunkDetails.setChunkName(fileName);
                chunkDetails.setChunkOffset(chunkInfo.getOffset());
                chunkDetailsList.add(chunkDetails);
            }
            containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
            containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
            containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
            containerChunkInfo.setFiles(chunkPaths);
            containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
            Gson gson = new GsonBuilder().create();
            if (isVerbose()) {
                element = gson.toJsonTree(containerChunkInfoVerbose);
            } else {
                element = gson.toJsonTree(containerChunkInfo);
            }
            jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
            jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
            jsonObj.addProperty("Container-ID", containerId);
            jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
            jsonObj.add("Locations", element);
            responseFromAllNodes.add(jsonObj);
            xceiverClientManager.releaseClientForReadData(xceiverClient, false);
        }
        responseArrayList.add(responseFromAllNodes);
    }
    result.add("KeyLocations", responseArrayList);
    Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
    String prettyJson = gson2.toJson(result);
    System.out.println(prettyJson);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ContainerLayoutVersion(org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion) ArrayList(java.util.ArrayList) JsonObject(com.google.gson.JsonObject) Gson(com.google.gson.Gson) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) HashSet(java.util.HashSet) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) GsonBuilder(com.google.gson.GsonBuilder) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) JsonArray(com.google.gson.JsonArray) JsonElement(com.google.gson.JsonElement) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File)

Example 2 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class ContainerTestHelper method newWriteChunkRequestBuilder.

public static Builder newWriteChunkRequestBuilder(Pipeline pipeline, BlockID blockID, int datalen, int seq) throws IOException {
    LOG.trace("writeChunk {} (blockID={}) to pipeline={}", datalen, blockID, pipeline);
    ContainerProtos.WriteChunkRequestProto.Builder writeRequest = ContainerProtos.WriteChunkRequestProto.newBuilder();
    writeRequest.setBlockID(blockID.getDatanodeBlockIDProtobuf());
    ChunkBuffer data = getData(datalen);
    ChunkInfo info = getChunk(blockID.getLocalID(), seq, 0, datalen);
    setDataChecksum(info, data);
    writeRequest.setChunkData(info.getProtoBufMessage());
    writeRequest.setData(data.toByteString());
    Builder request = ContainerCommandRequestProto.newBuilder();
    request.setCmdType(ContainerProtos.Type.WriteChunk);
    request.setContainerID(blockID.getContainerID());
    request.setWriteChunk(writeRequest);
    request.setDatanodeUuid(pipeline.getFirstNode().getUuidString());
    return request;
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) Builder(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto.Builder) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer)

Example 3 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class KeyValueHandler method handleReadChunk.

/**
 * Handle Read Chunk operation. Calls ChunkManager to process the request.
 */
ContainerCommandResponseProto handleReadChunk(ContainerCommandRequestProto request, KeyValueContainer kvContainer, DispatcherContext dispatcherContext) {
    if (!request.hasReadChunk()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Malformed Read Chunk request. trace ID: {}", request.getTraceID());
        }
        return malformedRequest(request);
    }
    ChunkBuffer data;
    try {
        BlockID blockID = BlockID.getFromProtobuf(request.getReadChunk().getBlockID());
        ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(request.getReadChunk().getChunkData());
        Preconditions.checkNotNull(chunkInfo);
        checkContainerIsHealthy(kvContainer, blockID, Type.ReadChunk);
        BlockUtils.verifyBCSId(kvContainer, blockID);
        if (dispatcherContext == null) {
            dispatcherContext = new DispatcherContext.Builder().build();
        }
        boolean isReadChunkV0 = getReadChunkVersion(request.getReadChunk()).equals(ContainerProtos.ReadChunkVersion.V0);
        if (isReadChunkV0) {
            // For older clients, set ReadDataIntoSingleBuffer to true so that
            // all the data read from chunk file is returned as a single
            // ByteString. Older clients cannot process data returned as a list
            // of ByteStrings.
            chunkInfo.setReadDataIntoSingleBuffer(true);
        }
        data = chunkManager.readChunk(kvContainer, blockID, chunkInfo, dispatcherContext);
        // For client reads, the client is expected to validate.
        if (dispatcherContext.isReadFromTmpFile()) {
            validateChunkChecksumData(data, chunkInfo);
        }
        metrics.incContainerBytesStats(Type.ReadChunk, chunkInfo.getLen());
    } catch (StorageContainerException ex) {
        return ContainerUtils.logAndReturnError(LOG, ex, request);
    } catch (IOException ex) {
        return ContainerUtils.logAndReturnError(LOG, new StorageContainerException("Read Chunk failed", ex, IO_EXCEPTION), request);
    }
    Preconditions.checkNotNull(data, "Chunk data is null");
    return getReadChunkResponse(request, data, byteBufferToByteString);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockID(org.apache.hadoop.hdds.client.BlockID) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) IOException(java.io.IOException)

Example 4 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class KeyValueHandler method deleteBlock.

/**
 * Called by BlockDeletingService to delete all the chunks in a block
 * before proceeding to delete the block info from DB.
 */
@Override
public void deleteBlock(Container container, BlockData blockData) throws IOException {
    chunkManager.deleteChunks(container, blockData);
    if (LOG.isDebugEnabled()) {
        for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
            ChunkInfo info = ChunkInfo.getFromProtoBuf(chunkInfo);
            LOG.debug("block {} chunk {} deleted", blockData.getBlockID(), info);
        }
    }
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)

Example 5 with ChunkInfo

use of org.apache.hadoop.ozone.container.common.helpers.ChunkInfo in project ozone by apache.

the class TestContainerPersistence method testPutBlockWithLotsOfChunks.

/**
 * Tests a put block and read block.
 *
 * @throws IOException
 * @throws NoSuchAlgorithmException
 */
@Test
public void testPutBlockWithLotsOfChunks() throws IOException, NoSuchAlgorithmException {
    final int chunkCount = 2;
    final int datalen = 1024;
    long totalSize = 0L;
    long testContainerID = getTestContainerID();
    Container container = addContainer(containerSet, testContainerID);
    BlockID blockID = ContainerTestHelper.getTestBlockID(testContainerID);
    List<ChunkInfo> chunkList = new LinkedList<>();
    for (int x = 0; x < chunkCount; x++) {
        ChunkInfo info = new ChunkInfo(String.format("%d.data", blockID.getLocalID()), x * datalen, datalen);
        ChunkBuffer data = getData(datalen);
        setDataChecksum(info, data);
        chunkManager.writeChunk(container, blockID, info, data, getDispatcherContext());
        totalSize += datalen;
        chunkList.add(info);
    }
    long bytesUsed = container.getContainerData().getBytesUsed();
    Assert.assertEquals(totalSize, bytesUsed);
    long writeBytes = container.getContainerData().getWriteBytes();
    Assert.assertEquals(chunkCount * datalen, writeBytes);
    long readCount = container.getContainerData().getReadCount();
    Assert.assertEquals(0, readCount);
    long writeCount = container.getContainerData().getWriteCount();
    Assert.assertEquals(chunkCount, writeCount);
    BlockData blockData = new BlockData(blockID);
    List<ContainerProtos.ChunkInfo> chunkProtoList = new LinkedList<>();
    for (ChunkInfo i : chunkList) {
        chunkProtoList.add(i.getProtoBufMessage());
    }
    blockData.setChunks(chunkProtoList);
    blockManager.putBlock(container, blockData);
    BlockData readBlockData = blockManager.getBlock(container, blockData.getBlockID());
    ChunkInfo lastChunk = chunkList.get(chunkList.size() - 1);
    ChunkInfo readChunk = ChunkInfo.getFromProtoBuf(readBlockData.getChunks().get(readBlockData.getChunks().size() - 1));
    Assert.assertEquals(lastChunk.getChecksumData(), readChunk.getChecksumData());
}
Also used : KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer) Container(org.apache.hadoop.ozone.container.common.interfaces.Container) ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) BlockID(org.apache.hadoop.hdds.client.BlockID) ChunkBuffer(org.apache.hadoop.ozone.common.ChunkBuffer) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) LinkedList(java.util.LinkedList) Test(org.junit.Test)

Aggregations

ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)40 BlockID (org.apache.hadoop.hdds.client.BlockID)28 KeyValueContainer (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)19 Test (org.junit.Test)19 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)18 ChunkBuffer (org.apache.hadoop.ozone.common.ChunkBuffer)14 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)11 ArrayList (java.util.ArrayList)10 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)10 ChunkManager (org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager)10 DispatcherContext (org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext)8 File (java.io.File)7 IOException (java.io.IOException)7 LinkedList (java.util.LinkedList)6 Container (org.apache.hadoop.ozone.container.common.interfaces.Container)6 ByteBuffer (java.nio.ByteBuffer)5 ReferenceCountedDB (org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)5 KeyValueContainerData (org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData)4 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)3 RoundRobinVolumeChoosingPolicy (org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy)3