Search in sources :

Example 21 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class ReconContainerManager method checkContainerStateAndUpdate.

/**
 *  Check if container state is not open. In SCM, container state
 *  changes to CLOSING first, and then the close command is pushed down
 *  to Datanodes. Recon 'learns' this from DN, and hence replica state
 *  will move container state to 'CLOSING'.
 *
 * @param containerID containerID to check
 * @param state  state to be compared
 * @throws IOException
 */
private void checkContainerStateAndUpdate(ContainerID containerID, ContainerReplicaProto.State state) throws Exception {
    ContainerInfo containerInfo = getContainer(containerID);
    if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN) && !state.equals(ContainerReplicaProto.State.OPEN) && isHealthy(state)) {
        LOG.info("Container {} has state OPEN, but given state is {}.", containerID, state);
        final PipelineID pipelineID = containerInfo.getPipelineID();
        // subtract open container count from the map
        int curCnt = pipelineToOpenContainer.getOrDefault(pipelineID, 0);
        if (curCnt == 1) {
            pipelineToOpenContainer.remove(pipelineID);
        } else if (curCnt > 0) {
            pipelineToOpenContainer.put(pipelineID, curCnt - 1);
        }
        updateContainerState(containerID, FINALIZE);
    }
}
Also used : ContainerInfo(org.apache.hadoop.hdds.scm.container.ContainerInfo) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID)

Example 22 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class TestBlockManager method testAllocateBlockWithExclusion.

@Test
public void testAllocateBlockWithExclusion() throws Exception {
    try {
        while (true) {
            pipelineManager.createPipeline(replicationConfig);
        }
    } catch (IOException e) {
    }
    HddsTestUtils.openAllRatisPipelines(pipelineManager);
    ExcludeList excludeList = new ExcludeList();
    excludeList.addPipeline(pipelineManager.getPipelines(replicationConfig).get(0).getId());
    AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, excludeList);
    Assert.assertNotNull(block);
    for (PipelineID id : excludeList.getPipelineIds()) {
        Assert.assertNotEquals(block.getPipeline().getId(), id);
    }
    for (Pipeline pipeline : pipelineManager.getPipelines(replicationConfig)) {
        excludeList.addPipeline(pipeline.getId());
    }
    block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, replicationConfig, OzoneConsts.OZONE, excludeList);
    Assert.assertNotNull(block);
    Assert.assertTrue(excludeList.getPipelineIds().contains(block.getPipeline().getId()));
}
Also used : ExcludeList(org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList) AllocatedBlock(org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) IOException(java.io.IOException) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 23 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class HddsTestUtils method getPipelineActionFromDatanode.

public static PipelineActionsFromDatanode getPipelineActionFromDatanode(DatanodeDetails dn, PipelineID... pipelineIDs) {
    PipelineActionsProto.Builder actionsProtoBuilder = PipelineActionsProto.newBuilder();
    for (PipelineID pipelineID : pipelineIDs) {
        ClosePipelineInfo closePipelineInfo = ClosePipelineInfo.newBuilder().setPipelineID(pipelineID.getProtobuf()).setReason(ClosePipelineInfo.Reason.PIPELINE_FAILED).setDetailedReason("").build();
        actionsProtoBuilder.addPipelineActions(PipelineAction.newBuilder().setClosePipeline(closePipelineInfo).setAction(PipelineAction.Action.CLOSE).build());
    }
    return new PipelineActionsFromDatanode(dn, actionsProtoBuilder.build());
}
Also used : ClosePipelineInfo(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo) PipelineActionsFromDatanode(org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) PipelineActionsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineActionsProto)

Example 24 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class TestCreatePipelineCommandHandler method testPipelineCreation.

@Test
public void testPipelineCreation() throws IOException {
    final List<DatanodeDetails> datanodes = getDatanodes();
    final PipelineID pipelineID = PipelineID.randomId();
    final SCMCommand<CreatePipelineCommandProto> command = new CreatePipelineCommand(pipelineID, HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE, datanodes);
    final XceiverServerSpi writeChanel = Mockito.mock(XceiverServerSpi.class);
    final DatanodeStateMachine dnsm = Mockito.mock(DatanodeStateMachine.class);
    Mockito.when(stateContext.getParent()).thenReturn(dnsm);
    Mockito.when(dnsm.getDatanodeDetails()).thenReturn(datanodes.get(0));
    Mockito.when(ozoneContainer.getWriteChannel()).thenReturn(writeChanel);
    Mockito.when(writeChanel.isExist(pipelineID.getProtobuf())).thenReturn(false);
    final CreatePipelineCommandHandler commandHandler = new CreatePipelineCommandHandler(new OzoneConfiguration());
    commandHandler.handle(command, ozoneContainer, stateContext, connectionManager);
    List<Integer> priorityList = new ArrayList<>(Collections.nCopies(datanodes.size(), 0));
    Mockito.verify(writeChanel, Mockito.times(1)).addGroup(pipelineID.getProtobuf(), datanodes, priorityList);
    Mockito.verify(raftClientGroupManager, Mockito.times(2)).add(Mockito.any(RaftGroup.class));
}
Also used : ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) CreatePipelineCommand(org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand) RaftGroup(org.apache.ratis.protocol.RaftGroup) XceiverServerSpi(org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) CreatePipelineCommandProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CreatePipelineCommandProto) DatanodeStateMachine(org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine) PrepareForTest(org.powermock.core.classloader.annotations.PrepareForTest) Test(org.junit.Test)

Example 25 with PipelineID

use of org.apache.hadoop.hdds.scm.pipeline.PipelineID in project ozone by apache.

the class KeyOutputStream method handleException.

/**
 * It performs following actions :
 * a. Updates the committed length at datanode for the current stream in
 * datanode.
 * b. Reads the data from the underlying buffer and writes it the next stream.
 *
 * @param streamEntry StreamEntry
 * @param exception   actual exception that occurred
 * @throws IOException Throws IOException if Write fails
 */
private void handleException(BlockOutputStreamEntry streamEntry, IOException exception) throws IOException {
    Throwable t = HddsClientUtils.checkForException(exception);
    Preconditions.checkNotNull(t);
    boolean retryFailure = checkForRetryFailure(t);
    boolean containerExclusionException = false;
    if (!retryFailure) {
        containerExclusionException = checkIfContainerToExclude(t);
    }
    Pipeline pipeline = streamEntry.getPipeline();
    PipelineID pipelineId = pipeline.getId();
    long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength();
    streamEntry.resetToAckedPosition();
    long bufferedDataLen = blockOutputStreamEntryPool.computeBufferData();
    if (containerExclusionException) {
        LOG.debug("Encountered exception {}. The last committed block length is {}, " + "uncommitted data length is {} retry count {}", exception, totalSuccessfulFlushedData, bufferedDataLen, retryCount);
    } else {
        LOG.warn("Encountered exception {} on the pipeline {}. " + "The last committed block length is {}, " + "uncommitted data length is {} retry count {}", exception, pipeline, totalSuccessfulFlushedData, bufferedDataLen, retryCount);
    }
    Preconditions.checkArgument(bufferedDataLen <= config.getStreamBufferMaxSize());
    Preconditions.checkArgument(offset - blockOutputStreamEntryPool.getKeyLength() == bufferedDataLen);
    long containerId = streamEntry.getBlockID().getContainerID();
    Collection<DatanodeDetails> failedServers = streamEntry.getFailedServers();
    Preconditions.checkNotNull(failedServers);
    ExcludeList excludeList = blockOutputStreamEntryPool.getExcludeList();
    if (!failedServers.isEmpty()) {
        excludeList.addDatanodes(failedServers);
    }
    // exclusion list , otherwise add the pipeline to the exclusion list
    if (containerExclusionException) {
        excludeList.addConatinerId(ContainerID.valueOf(containerId));
    } else {
        excludeList.addPipeline(pipelineId);
    }
    // just clean up the current stream.
    streamEntry.cleanup(retryFailure);
    // write data on the  closed container/pipeline
    if (containerExclusionException) {
        // discard subsequent pre allocated blocks from the streamEntries list
        // from the closed container
        blockOutputStreamEntryPool.discardPreallocatedBlocks(streamEntry.getBlockID().getContainerID(), null);
    } else {
        // In case there is timeoutException or Watch for commit happening over
        // majority or the client connection failure to the leader in the
        // pipeline, just discard all the pre allocated blocks on this pipeline.
        // Next block allocation will happen with excluding this specific pipeline
        // This will ensure if 2 way commit happens , it cannot span over multiple
        // blocks
        blockOutputStreamEntryPool.discardPreallocatedBlocks(-1, pipelineId);
    }
    if (bufferedDataLen > 0) {
        // If the data is still cached in the underlying stream, we need to
        // allocate new block and write this data in the datanode.
        handleRetry(exception, bufferedDataLen);
        // reset the retryCount after handling the exception
        retryCount = 0;
    }
}
Also used : ExcludeList(org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline)

Aggregations

PipelineID (org.apache.hadoop.hdds.scm.pipeline.PipelineID)35 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)15 Test (org.junit.Test)13 IOException (java.io.IOException)12 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)11 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)6 PipelineNotFoundException (org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException)6 XceiverServerSpi (org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi)5 ArrayList (java.util.ArrayList)4 HashMap (java.util.HashMap)4 UUID (java.util.UUID)4 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)4 List (java.util.List)3 ContainerID (org.apache.hadoop.hdds.scm.container.ContainerID)3 RaftGroup (org.apache.ratis.protocol.RaftGroup)3 VisibleForTesting (com.google.common.annotations.VisibleForTesting)2 ConfigurationSource (org.apache.hadoop.hdds.conf.ConfigurationSource)2 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)2 ClosePipelineInfo (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo)2 CreatePipelineCommandProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CreatePipelineCommandProto)2