Search in sources :

Example 1 with ContainerLayoutVersion

use of org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion in project ozone by apache.

the class ChunkKeyHandler method execute.

@Override
protected void execute(OzoneClient client, OzoneAddress address) throws IOException, OzoneClientException {
    containerOperationClient = new ContainerOperationClient(createOzoneConfiguration());
    xceiverClientManager = containerOperationClient.getXceiverClientManager();
    ozoneManagerClient = client.getObjectStore().getClientProxy().getOzoneManagerClient();
    address.ensureKeyAddress();
    JsonElement element;
    JsonObject result = new JsonObject();
    String volumeName = address.getVolumeName();
    String bucketName = address.getBucketName();
    String keyName = address.getKeyName();
    List<ContainerProtos.ChunkInfo> tempchunks = null;
    List<ChunkDetails> chunkDetailsList = new ArrayList<ChunkDetails>();
    HashSet<String> chunkPaths = new HashSet<>();
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    // querying  the keyLocations.The OM is queried to get containerID and
    // localID pertaining to a given key
    List<OmKeyLocationInfo> locationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    // for zero-sized key
    if (locationInfos.isEmpty()) {
        System.out.println("No Key Locations Found");
        return;
    }
    ContainerLayoutVersion containerLayoutVersion = ContainerLayoutVersion.getConfiguredVersion(getConf());
    JsonArray responseArrayList = new JsonArray();
    for (OmKeyLocationInfo keyLocation : locationInfos) {
        ContainerChunkInfo containerChunkInfoVerbose = new ContainerChunkInfo();
        ContainerChunkInfo containerChunkInfo = new ContainerChunkInfo();
        long containerId = keyLocation.getContainerID();
        chunkPaths.clear();
        Pipeline pipeline = keyLocation.getPipeline();
        if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
            pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).build();
        }
        xceiverClient = xceiverClientManager.acquireClientForReadData(pipeline);
        // Datanode is queried to get chunk information.Thus querying the
        // OM,SCM and datanode helps us get chunk location information
        ContainerProtos.DatanodeBlockID datanodeBlockID = keyLocation.getBlockID().getDatanodeBlockIDProtobuf();
        // doing a getBlock on all nodes
        HashMap<DatanodeDetails, ContainerProtos.GetBlockResponseProto> responses = null;
        try {
            responses = ContainerProtocolCalls.getBlockFromAllNodes(xceiverClient, datanodeBlockID, keyLocation.getToken());
        } catch (InterruptedException e) {
            LOG.error("Execution interrupted due to " + e);
            Thread.currentThread().interrupt();
        }
        JsonArray responseFromAllNodes = new JsonArray();
        for (Map.Entry<DatanodeDetails, ContainerProtos.GetBlockResponseProto> entry : responses.entrySet()) {
            JsonObject jsonObj = new JsonObject();
            if (entry.getValue() == null) {
                LOG.error("Cant execute getBlock on this node");
                continue;
            }
            tempchunks = entry.getValue().getBlockData().getChunksList();
            ContainerProtos.ContainerDataProto containerData = containerOperationClient.readContainer(keyLocation.getContainerID(), keyLocation.getPipeline());
            for (ContainerProtos.ChunkInfo chunkInfo : tempchunks) {
                String fileName = containerLayoutVersion.getChunkFile(new File(getChunkLocationPath(containerData.getContainerPath())), keyLocation.getBlockID(), ChunkInfo.getFromProtoBuf(chunkInfo)).toString();
                chunkPaths.add(fileName);
                ChunkDetails chunkDetails = new ChunkDetails();
                chunkDetails.setChunkName(fileName);
                chunkDetails.setChunkOffset(chunkInfo.getOffset());
                chunkDetailsList.add(chunkDetails);
            }
            containerChunkInfoVerbose.setContainerPath(containerData.getContainerPath());
            containerChunkInfoVerbose.setPipeline(keyLocation.getPipeline());
            containerChunkInfoVerbose.setChunkInfos(chunkDetailsList);
            containerChunkInfo.setFiles(chunkPaths);
            containerChunkInfo.setPipelineID(keyLocation.getPipeline().getId().getId());
            Gson gson = new GsonBuilder().create();
            if (isVerbose()) {
                element = gson.toJsonTree(containerChunkInfoVerbose);
            } else {
                element = gson.toJsonTree(containerChunkInfo);
            }
            jsonObj.addProperty("Datanode-HostName", entry.getKey().getHostName());
            jsonObj.addProperty("Datanode-IP", entry.getKey().getIpAddress());
            jsonObj.addProperty("Container-ID", containerId);
            jsonObj.addProperty("Block-ID", keyLocation.getLocalID());
            jsonObj.add("Locations", element);
            responseFromAllNodes.add(jsonObj);
            xceiverClientManager.releaseClientForReadData(xceiverClient, false);
        }
        responseArrayList.add(responseFromAllNodes);
    }
    result.add("KeyLocations", responseArrayList);
    Gson gson2 = new GsonBuilder().setPrettyPrinting().create();
    String prettyJson = gson2.toJson(result);
    System.out.println(prettyJson);
}
Also used : ChunkInfo(org.apache.hadoop.ozone.container.common.helpers.ChunkInfo) ContainerLayoutVersion(org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion) ArrayList(java.util.ArrayList) JsonObject(com.google.gson.JsonObject) Gson(com.google.gson.Gson) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) HashSet(java.util.HashSet) ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) GsonBuilder(com.google.gson.GsonBuilder) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) ContainerOperationClient(org.apache.hadoop.hdds.scm.cli.ContainerOperationClient) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) JsonArray(com.google.gson.JsonArray) JsonElement(com.google.gson.JsonElement) HashMap(java.util.HashMap) Map(java.util.Map) File(java.io.File)

Example 2 with ContainerLayoutVersion

use of org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion in project ozone by apache.

the class KeyValueHandler method handleCreateContainer.

/**
 * Handles Create Container Request. If successful, adds the container to
 * ContainerSet and sends an ICR to the SCM.
 */
ContainerCommandResponseProto handleCreateContainer(ContainerCommandRequestProto request, KeyValueContainer kvContainer) {
    if (!request.hasCreateContainer()) {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Malformed Create Container request. trace ID: {}", request.getTraceID());
        }
        return malformedRequest(request);
    }
    // Create Container request should be passed a null container as the
    // container would be created here.
    Preconditions.checkArgument(kvContainer == null);
    long containerID = request.getContainerID();
    ContainerLayoutVersion layoutVersion = ContainerLayoutVersion.getConfiguredVersion(conf);
    KeyValueContainerData newContainerData = new KeyValueContainerData(containerID, layoutVersion, maxContainerSize, request.getPipelineID(), getDatanodeId());
    // TODO: Add support to add metadataList to ContainerData. Add metadata
    // to container during creation.
    KeyValueContainer newContainer = new KeyValueContainer(newContainerData, conf);
    boolean created = false;
    Lock containerIdLock = containerCreationLocks.get(containerID);
    containerIdLock.lock();
    try {
        if (containerSet.getContainer(containerID) == null) {
            newContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
            created = containerSet.addContainer(newContainer);
        } else {
            // The create container request for an already existing container can
            // arrive in case the ContainerStateMachine reapplies the transaction
            // on datanode restart. Just log a warning msg here.
            LOG.debug("Container already exists. container Id {}", containerID);
        }
    } catch (StorageContainerException ex) {
        return ContainerUtils.logAndReturnError(LOG, ex, request);
    } finally {
        containerIdLock.unlock();
    }
    if (created) {
        try {
            sendICR(newContainer);
        } catch (StorageContainerException ex) {
            return ContainerUtils.logAndReturnError(LOG, ex, request);
        }
    }
    return getSuccessResponse(request);
}
Also used : ContainerLayoutVersion(org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) Lock(java.util.concurrent.locks.Lock)

Example 3 with ContainerLayoutVersion

use of org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion in project ozone by apache.

the class KeyValueContainerCheck method scanData.

private void scanData(DataTransferThrottler throttler, Canceler canceler) throws IOException {
    /*
     * Check the integrity of the DB inside each container.
     * 1. iterate over each key (Block) and locate the chunks for the block
     * 2. garbage detection (TBD): chunks which exist in the filesystem,
     *    but not in the DB. This function will be implemented in HDDS-1202
     * 3. chunk checksum verification.
     */
    Preconditions.checkState(onDiskContainerData != null, "invoke loadContainerData prior to calling this function");
    File metaDir = new File(metadataPath);
    File dbFile = KeyValueContainerLocationUtil.getContainerDBFile(metaDir, containerID);
    if (!dbFile.exists() || !dbFile.canRead()) {
        String dbFileErrorMsg = "Unable to access DB File [" + dbFile.toString() + "] for Container [" + containerID + "] metadata path [" + metadataPath + "]";
        throw new IOException(dbFileErrorMsg);
    }
    onDiskContainerData.setDbFile(dbFile);
    ContainerLayoutVersion layout = onDiskContainerData.getLayoutVersion();
    try (ReferenceCountedDB db = BlockUtils.getDB(onDiskContainerData, checkConfig);
        BlockIterator<BlockData> kvIter = db.getStore().getBlockIterator()) {
        while (kvIter.hasNext()) {
            BlockData block = kvIter.nextBlock();
            for (ContainerProtos.ChunkInfo chunk : block.getChunks()) {
                File chunkFile = layout.getChunkFile(onDiskContainerData, block.getBlockID(), ChunkInfo.getFromProtoBuf(chunk));
                if (!chunkFile.exists()) {
                    // concurrent mutation in Block DB? lookup the block again.
                    String localBlockID = Long.toString(block.getBlockID().getLocalID());
                    BlockData bdata = db.getStore().getBlockDataTable().get(localBlockID);
                    if (bdata != null) {
                        throw new IOException("Missing chunk file " + chunkFile.getAbsolutePath());
                    }
                } else if (chunk.getChecksumData().getType() != ContainerProtos.ChecksumType.NONE) {
                    verifyChecksum(block, chunk, chunkFile, layout, throttler, canceler);
                }
            }
        }
    }
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ContainerLayoutVersion(org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion) ByteString(org.apache.ratis.thirdparty.com.google.protobuf.ByteString) IOException(java.io.IOException) BlockData(org.apache.hadoop.ozone.container.common.helpers.BlockData) File(java.io.File) ReferenceCountedDB(org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB)

Example 4 with ContainerLayoutVersion

use of org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion in project ozone by apache.

the class GeneratorDatanode method createContainer.

private KeyValueContainer createContainer(long containerId) throws IOException {
    ContainerLayoutVersion layoutVersion = ContainerLayoutVersion.getConfiguredVersion(config);
    KeyValueContainerData keyValueContainerData = new KeyValueContainerData(containerId, layoutVersion, getContainerSize(config), getPrefix(), datanodeId);
    KeyValueContainer keyValueContainer = new KeyValueContainer(keyValueContainerData, config);
    try {
        keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
    } catch (StorageContainerException ex) {
        throw new RuntimeException(ex);
    }
    return keyValueContainer;
}
Also used : ContainerLayoutVersion(org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion) StorageContainerException(org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException) KeyValueContainerData(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData) KeyValueContainer(org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer)

Aggregations

ContainerLayoutVersion (org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion)4 File (java.io.File)2 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)2 StorageContainerException (org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException)2 Gson (com.google.gson.Gson)1 GsonBuilder (com.google.gson.GsonBuilder)1 JsonArray (com.google.gson.JsonArray)1 JsonElement (com.google.gson.JsonElement)1 JsonObject (com.google.gson.JsonObject)1 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 HashSet (java.util.HashSet)1 Map (java.util.Map)1 Lock (java.util.concurrent.locks.Lock)1 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)1 ContainerOperationClient (org.apache.hadoop.hdds.scm.cli.ContainerOperationClient)1 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)1 BlockData (org.apache.hadoop.ozone.container.common.helpers.BlockData)1 ChunkInfo (org.apache.hadoop.ozone.container.common.helpers.ChunkInfo)1