Search in sources :

Example 1 with Pipeline

use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.

the class StorageContainerLocationProtocolClientSideTranslatorPB method getPipeline.

@Override
public Pipeline getPipeline(HddsProtos.PipelineID pipelineID) throws IOException {
    GetPipelineRequestProto request = GetPipelineRequestProto.newBuilder().setPipelineID(pipelineID).setTraceID(TracingUtil.exportCurrentSpan()).build();
    GetPipelineResponseProto response = submitRequest(Type.GetPipeline, builder -> builder.setGetPipelineRequest(request)).getGetPipelineResponse();
    return Pipeline.getFromProtobuf(response.getPipeline());
}
Also used : DatanodeAdminErrorResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DatanodeAdminErrorResponseProto) GetContainerWithPipelineRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) RecommissionNodesRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesRequestProto) StartReplicationManagerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartReplicationManagerRequestProto) UpgradeFinalizer(org.apache.hadoop.ozone.upgrade.UpgradeFinalizer) SCMContainerLocationFailoverProxyProvider(org.apache.hadoop.hdds.scm.proxy.SCMContainerLocationFailoverProxyProvider) ServiceException(com.google.protobuf.ServiceException) SCMListContainerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerRequestProto) StopContainerBalancerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopContainerBalancerRequestProto) GetContainerWithPipelineBatchRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineBatchRequestProto) ScmContainerLocationResponse(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationResponse) Pair(org.apache.commons.lang3.tuple.Pair) ProtocolTranslator(org.apache.hadoop.ipc.ProtocolTranslator) ProtobufHelper(org.apache.hadoop.ipc.ProtobufHelper) StartMaintenanceNodesRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesRequestProto) Map(java.util.Map) StatusAndMessages(org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.StatusAndMessages) ScmInfo(org.apache.hadoop.hdds.scm.ScmInfo) RecommissionNodesResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.RecommissionNodesResponseProto) SCMListContainerResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMListContainerResponseProto) NodeQueryRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryRequestProto) ForceExitSafeModeRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeRequestProto) PipelineRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineRequestProto) Builder(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest.Builder) StartContainerBalancerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerRequestProto) ActivatePipelineRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ActivatePipelineRequestProto) RpcController(com.google.protobuf.RpcController) FinalizeScmUpgradeResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.FinalizeScmUpgradeResponseProto) SCMCloseContainerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMCloseContainerRequestProto) SCMDeleteContainerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SCMDeleteContainerRequestProto) Type(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.Type) ClosePipelineRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ClosePipelineRequestProto) ContainerResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerResponseProto) DeactivatePipelineRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DeactivatePipelineRequestProto) List(java.util.List) GetContainerReplicasRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerReplicasRequestProto) ListPipelineRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto) QueryUpgradeFinalizationProgressRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.QueryUpgradeFinalizationProgressRequestProto) QueryUpgradeFinalizationProgressResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.QueryUpgradeFinalizationProgressResponseProto) ContainerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerRequestProto) ReplicationManagerReportResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerReportResponseProto) GetContainerCountResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountResponseProto) StopReplicationManagerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StopReplicationManagerRequestProto) RPC(org.apache.hadoop.ipc.RPC) Optional(java.util.Optional) ReplicationManagerReport(org.apache.hadoop.hdds.scm.container.ReplicationManagerReport) GetContainerCountRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerCountRequestProto) ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) GetScmInfoResponseProto(org.apache.hadoop.hdds.protocol.proto.HddsProtos.GetScmInfoResponseProto) CURRENT_VERSION(org.apache.hadoop.ozone.ClientVersions.CURRENT_VERSION) StorageContainerLocationProtocolProtos(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos) InSafeModeRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto) HashMap(java.util.HashMap) GetSafeModeRuleStatusesRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesRequestProto) UpgradeFinalizationStatus(org.apache.hadoop.hdds.protocol.proto.HddsProtos.UpgradeFinalizationStatus) GetContainerTokenResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenResponseProto) ArrayList(java.util.ArrayList) FinalizeScmUpgradeRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.FinalizeScmUpgradeRequestProto) DatanodeUsageInfoRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DatanodeUsageInfoRequestProto) GetContainerRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto) ReplicationManagerStatusRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusRequestProto) NodeQueryResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.NodeQueryResponseProto) ScmContainerLocationRequest(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ScmContainerLocationRequest) GetPipelineResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto) DecommissionNodesRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesRequestProto) ContainerBalancerStatusRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusRequestProto) ReplicationManagerStatusResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerStatusResponseProto) GetExistContainerWithPipelinesInBatchRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetExistContainerWithPipelinesInBatchRequestProto) GetPipelineRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineRequestProto) DatanodeUsageInfoResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DatanodeUsageInfoResponseProto) StartMaintenanceNodesResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartMaintenanceNodesResponseProto) DatanodeAdminError(org.apache.hadoop.hdds.scm.DatanodeAdminError) ForceExitSafeModeResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto) ContainerBalancerStatusResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ContainerBalancerStatusResponseProto) ContainerWithPipeline(org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline) ContainerID(org.apache.hadoop.hdds.scm.container.ContainerID) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) IOException(java.io.IOException) Token(org.apache.hadoop.security.token.Token) PipelineResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.PipelineResponseProto) Consumer(java.util.function.Consumer) GetSafeModeRuleStatusesResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetSafeModeRuleStatusesResponseProto) DecommissionNodesResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.DecommissionNodesResponseProto) ListPipelineResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto) InterfaceAudience(org.apache.hadoop.hdds.annotation.InterfaceAudience) SafeModeRuleStatusProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.SafeModeRuleStatusProto) TracingUtil(org.apache.hadoop.hdds.tracing.TracingUtil) StorageContainerLocationProtocol(org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol) Closeable(java.io.Closeable) GetContainerTokenRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerTokenRequestProto) Preconditions(com.google.common.base.Preconditions) ReplicationManagerReportRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ReplicationManagerReportRequestProto) RetryProxy(org.apache.hadoop.io.retry.RetryProxy) StartContainerBalancerResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.StartContainerBalancerResponseProto) GetPipelineRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineRequestProto) GetPipelineResponseProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto)

Example 2 with Pipeline

use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.

the class TestDatanodeUpgradeToScmHA method testFormattingNewVolumes.

@Test
public void testFormattingNewVolumes() throws Exception {
    // / SETUP ///
    String originalScmID = startScmServer();
    File preFinVolume1 = addVolume();
    startPreFinalizedDatanode();
    final Pipeline pipeline = getPipeline();
    // / PRE-FINALIZED: Write and Read from formatted volume ///
    Assert.assertEquals(1, dsm.getContainer().getVolumeSet().getVolumesList().size());
    Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
    // Add container with data, make sure it can be read and written.
    final long containerID = addContainer(pipeline);
    ContainerProtos.WriteChunkRequestProto writeChunk = putBlock(containerID, pipeline);
    readChunk(writeChunk, pipeline);
    checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
    // / PRE-FINALIZED: Restart with SCM HA enabled and new SCM ID ///
    // Now SCM and enough other DNs finalize to enable SCM HA. This DN is
    // restarted with SCM HA config and gets a different SCM ID.
    conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
    changeScmID();
    // A new volume is added that must be formatted.
    File preFinVolume2 = addVolume();
    restartDatanode(HDDSLayoutFeature.INITIAL_VERSION.layoutVersion());
    Assert.assertEquals(2, dsm.getContainer().getVolumeSet().getVolumesList().size());
    Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
    // Because DN mlv would be behind SCM mlv, only reads are allowed.
    readChunk(writeChunk, pipeline);
    // On restart, there should have been no changes to the paths already used.
    checkPreFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
    // No new containers can be created on this volume since SCM MLV is ahead
    // of DN MLV at this point.
    // cluster ID should always be used for the new volume since SCM HA is now
    // enabled.
    checkVolumePathID(preFinVolume2, CLUSTER_ID);
    // / FINALIZE ///
    closeContainer(containerID, pipeline);
    dsm.finalizeUpgrade();
    LambdaTestUtils.await(2000, 500, () -> dsm.getLayoutVersionManager().isAllowed(HDDSLayoutFeature.SCM_HA));
    // / FINALIZED: Add a new volume and check its formatting ///
    // Add a new volume that should be formatted with cluster ID only, since
    // DN has finalized.
    File finVolume = addVolume();
    // Yet another SCM ID is received this time, but it should not matter.
    changeScmID();
    restartDatanode(HDDSLayoutFeature.SCM_HA.layoutVersion());
    Assert.assertEquals(3, dsm.getContainer().getVolumeSet().getVolumesList().size());
    Assert.assertEquals(0, dsm.getContainer().getVolumeSet().getFailedVolumesList().size());
    checkFinalizedVolumePathID(preFinVolume1, originalScmID, CLUSTER_ID);
    checkVolumePathID(preFinVolume2, CLUSTER_ID);
    checkContainerPathID(containerID, originalScmID, CLUSTER_ID);
    // New volume should have been formatted with cluster ID only, since the
    // datanode is finalized.
    checkVolumePathID(finVolume, CLUSTER_ID);
    // / FINALIZED: Read old data and write + read new data ///
    // Read container from before upgrade. The upgrade required it to be closed.
    readChunk(writeChunk, pipeline);
    // Write and read container after upgrade.
    long newContainerID = addContainer(pipeline);
    ContainerProtos.WriteChunkRequestProto newWriteChunk = putBlock(newContainerID, pipeline);
    readChunk(newWriteChunk, pipeline);
    // The new container should use cluster ID in its path.
    // The volume it is placed on is up to the implementation.
    checkContainerPathID(newContainerID, CLUSTER_ID);
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) File(java.io.File) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 3 with Pipeline

use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.

the class TestDatanodeUpgradeToScmHA method testReadsDuringFinalization.

@Test
public void testReadsDuringFinalization() throws Exception {
    // start DN and SCM
    startScmServer();
    addVolume();
    startPreFinalizedDatanode();
    final Pipeline pipeline = getPipeline();
    // Add data to read.
    final long containerID = addContainer(pipeline);
    ContainerProtos.WriteChunkRequestProto writeChunk = putBlock(containerID, pipeline);
    closeContainer(containerID, pipeline);
    // Create thread to keep reading during finalization.
    ExecutorService executor = Executors.newFixedThreadPool(1);
    Future<Void> readFuture = executor.submit(() -> {
        // Layout version check should be thread safe.
        while (!dsm.getLayoutVersionManager().isAllowed(HDDSLayoutFeature.SCM_HA)) {
            readChunk(writeChunk, pipeline);
        }
        // Make sure we can read after finalizing too.
        readChunk(writeChunk, pipeline);
        return null;
    });
    dsm.finalizeUpgrade();
    // If there was a failure reading during the upgrade, the exception will
    // be thrown here.
    readFuture.get();
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) ExecutorService(java.util.concurrent.ExecutorService) MockPipeline(org.apache.hadoop.hdds.scm.pipeline.MockPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 4 with Pipeline

use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.

the class RpcClient method getKeysEveryReplicas.

@Override
public Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> getKeysEveryReplicas(String volumeName, String bucketName, String keyName) throws IOException {
    Map<OmKeyLocationInfo, Map<DatanodeDetails, OzoneInputStream>> result = new LinkedHashMap<>();
    verifyVolumeName(volumeName);
    verifyBucketName(bucketName);
    Preconditions.checkNotNull(keyName);
    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setVolumeName(volumeName).setBucketName(bucketName).setKeyName(keyName).setRefreshPipeline(true).setSortDatanodesInPipeline(topologyAwareReadEnabled).build();
    OmKeyInfo keyInfo = ozoneManagerClient.lookupKey(keyArgs);
    List<OmKeyLocationInfo> keyLocationInfos = keyInfo.getLatestVersionLocations().getBlocksLatestVersionOnly();
    for (OmKeyLocationInfo keyLocationInfo : keyLocationInfos) {
        Map<DatanodeDetails, OzoneInputStream> blocks = new HashMap<>();
        Pipeline pipelineBefore = keyLocationInfo.getPipeline();
        List<DatanodeDetails> datanodes = pipelineBefore.getNodes();
        for (DatanodeDetails dn : datanodes) {
            List<DatanodeDetails> nodes = new ArrayList<>();
            nodes.add(dn);
            Pipeline pipeline = new Pipeline.Builder(pipelineBefore).setNodes(nodes).setId(PipelineID.randomId()).build();
            keyLocationInfo.setPipeline(pipeline);
            List<OmKeyLocationInfo> keyLocationInfoList = new ArrayList<>();
            keyLocationInfoList.add(keyLocationInfo);
            OmKeyLocationInfoGroup keyLocationInfoGroup = new OmKeyLocationInfoGroup(0, keyLocationInfoList);
            List<OmKeyLocationInfoGroup> keyLocationInfoGroups = new ArrayList<>();
            keyLocationInfoGroups.add(keyLocationInfoGroup);
            keyInfo.setKeyLocationVersions(keyLocationInfoGroups);
            OzoneInputStream is = createInputStream(keyInfo, Function.identity());
            blocks.put(dn, is);
        }
        result.put(keyLocationInfo, blocks);
    }
    return result;
}
Also used : OzoneInputStream(org.apache.hadoop.ozone.client.io.OzoneInputStream) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap) CacheBuilder(com.google.common.cache.CacheBuilder) ArrayList(java.util.ArrayList) OmKeyArgs(org.apache.hadoop.ozone.om.helpers.OmKeyArgs) OmKeyLocationInfo(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo) LinkedHashMap(java.util.LinkedHashMap) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) OmKeyLocationInfoGroup(org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) OmKeyInfo(org.apache.hadoop.ozone.om.helpers.OmKeyInfo) RepeatedOmKeyInfo(org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) HashMap(java.util.HashMap)

Example 5 with Pipeline

use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.

the class ReplicatedFileChecksumHelper method getChunkInfos.

// copied from BlockInputStream
/**
 * Send RPC call to get the block info from the container.
 * @return List of chunks in this block.
 */
protected List<ContainerProtos.ChunkInfo> getChunkInfos(OmKeyLocationInfo keyLocationInfo) throws IOException {
    // irrespective of the container state, we will always read via Standalone
    // protocol.
    Token<OzoneBlockTokenIdentifier> token = keyLocationInfo.getToken();
    Pipeline pipeline = keyLocationInfo.getPipeline();
    BlockID blockID = keyLocationInfo.getBlockID();
    if (pipeline.getType() != HddsProtos.ReplicationType.STAND_ALONE) {
        pipeline = Pipeline.newBuilder(pipeline).setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig()))).build();
    }
    boolean success = false;
    List<ContainerProtos.ChunkInfo> chunks;
    XceiverClientSpi xceiverClientSpi = null;
    try {
        if (LOG.isDebugEnabled()) {
            LOG.debug("Initializing BlockInputStream for get key to access {}", blockID.getContainerID());
        }
        xceiverClientSpi = getXceiverClientFactory().acquireClientForReadData(pipeline);
        ContainerProtos.DatanodeBlockID datanodeBlockID = blockID.getDatanodeBlockIDProtobuf();
        ContainerProtos.GetBlockResponseProto response = ContainerProtocolCalls.getBlock(xceiverClientSpi, datanodeBlockID, token);
        chunks = response.getBlockData().getChunksList();
        success = true;
    } finally {
        if (!success && xceiverClientSpi != null) {
            getXceiverClientFactory().releaseClientForReadData(xceiverClientSpi, false);
        }
    }
    return chunks;
}
Also used : ContainerProtos(org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos) BlockID(org.apache.hadoop.hdds.client.BlockID) XceiverClientSpi(org.apache.hadoop.hdds.scm.XceiverClientSpi) OzoneBlockTokenIdentifier(org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline)

Aggregations

Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)180 Test (org.junit.Test)102 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)65 ArrayList (java.util.ArrayList)49 ContainerInfo (org.apache.hadoop.hdds.scm.container.ContainerInfo)48 IOException (java.io.IOException)43 ContainerWithPipeline (org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline)37 OmKeyLocationInfo (org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo)37 BlockID (org.apache.hadoop.hdds.client.BlockID)35 OzoneOutputStream (org.apache.hadoop.ozone.client.io.OzoneOutputStream)35 XceiverClientSpi (org.apache.hadoop.hdds.scm.XceiverClientSpi)30 KeyOutputStream (org.apache.hadoop.ozone.client.io.KeyOutputStream)27 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)26 MockPipeline (org.apache.hadoop.hdds.scm.pipeline.MockPipeline)26 OmKeyInfo (org.apache.hadoop.ozone.om.helpers.OmKeyInfo)25 List (java.util.List)24 ContainerProtos (org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos)22 OmKeyArgs (org.apache.hadoop.ozone.om.helpers.OmKeyArgs)22 HashMap (java.util.HashMap)18 PipelineID (org.apache.hadoop.hdds.scm.pipeline.PipelineID)18