Search in sources :

Example 1 with SCMCommand

use of org.apache.hadoop.ozone.protocol.commands.SCMCommand in project ozone by apache.

the class PipelineReportHandler method processPipelineReport.

protected void processPipelineReport(PipelineReport report, DatanodeDetails dn, EventPublisher publisher) throws IOException {
    PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID());
    Pipeline pipeline;
    try {
        pipeline = pipelineManager.getPipeline(pipelineID);
    } catch (PipelineNotFoundException e) {
        if (scmContext.isLeader()) {
            LOGGER.info("Reported pipeline {} is not found", pipelineID);
            SCMCommand<?> command = new ClosePipelineCommand(pipelineID);
            command.setTerm(scmContext.getTermOfLeader());
            publisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(dn.getUuid(), command));
        }
        return;
    }
    setReportedDatanode(pipeline, dn);
    setPipelineLeaderId(report, pipeline, dn);
    if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) {
        if (LOGGER.isDebugEnabled()) {
            LOGGER.debug("Pipeline {} {} reported by {}", pipeline.getReplicationConfig(), pipeline.getId(), dn);
        }
        if (pipeline.isHealthy()) {
            pipelineManager.openPipeline(pipelineID);
        }
    }
    if (pipeline.isHealthy()) {
        if (pipelineAvailabilityCheck && scmSafeModeManager.getInSafeMode()) {
            publisher.fireEvent(SCMEvents.OPEN_PIPELINE, pipeline);
        }
    }
}
Also used : CommandForDatanode(org.apache.hadoop.ozone.protocol.commands.CommandForDatanode) ClosePipelineCommand(org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand)

Example 2 with SCMCommand

use of org.apache.hadoop.ozone.protocol.commands.SCMCommand in project ozone by apache.

the class SCMDatanodeProtocolServer method sendHeartbeat.

@Override
public SCMHeartbeatResponseProto sendHeartbeat(SCMHeartbeatRequestProto heartbeat) throws IOException {
    List<SCMCommandProto> cmdResponses = new ArrayList<>();
    for (SCMCommand cmd : heartbeatDispatcher.dispatch(heartbeat)) {
        cmdResponses.add(getCommandResponse(cmd));
    }
    boolean auditSuccess = true;
    Map<String, String> auditMap = Maps.newHashMap();
    auditMap.put("datanodeUUID", heartbeat.getDatanodeDetails().getUuid());
    auditMap.put("command", flatten(cmdResponses.toString()));
    try {
        return SCMHeartbeatResponseProto.newBuilder().setDatanodeUUID(heartbeat.getDatanodeDetails().getUuid()).addAllCommands(cmdResponses).build();
    } catch (Exception ex) {
        auditSuccess = false;
        AUDIT.logWriteFailure(buildAuditMessageForFailure(SCMAction.SEND_HEARTBEAT, auditMap, ex));
        throw ex;
    } finally {
        if (auditSuccess) {
            AUDIT.logWriteSuccess(buildAuditMessageForSuccess(SCMAction.SEND_HEARTBEAT, auditMap));
        }
    }
}
Also used : SCMCommandProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto) ArrayList(java.util.ArrayList) IOException(java.io.IOException) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand)

Example 3 with SCMCommand

use of org.apache.hadoop.ozone.protocol.commands.SCMCommand in project ozone by apache.

the class PipelineActionHandler method processPipelineAction.

/**
 * Process the given PipelineAction.
 *
 * @param datanode the datanode which has sent the PipelineAction
 * @param pipelineAction the PipelineAction
 * @param publisher EventPublisher to fire new events if required
 */
private void processPipelineAction(final DatanodeDetails datanode, final PipelineAction pipelineAction, final EventPublisher publisher) {
    final ClosePipelineInfo info = pipelineAction.getClosePipeline();
    final PipelineAction.Action action = pipelineAction.getAction();
    final PipelineID pid = PipelineID.getFromProtobuf(info.getPipelineID());
    try {
        LOG.info("Received pipeline action {} for {} from datanode {}. " + "Reason : {}", action, pid, datanode.getUuidString(), info.getDetailedReason());
        if (action == PipelineAction.Action.CLOSE) {
            pipelineManager.closePipeline(pipelineManager.getPipeline(pid), false);
        } else {
            LOG.error("unknown pipeline action:{}", action);
        }
    } catch (PipelineNotFoundException e) {
        LOG.warn("Pipeline action {} received for unknown pipeline {}, " + "firing close pipeline event.", action, pid);
        SCMCommand<?> command = new ClosePipelineCommand(pid);
        try {
            command.setTerm(scmContext.getTermOfLeader());
        } catch (NotLeaderException nle) {
            LOG.warn("Skip sending ClosePipelineCommand for pipeline {}," + " since not leader SCM.", pid);
            return;
        }
        publisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(datanode.getUuid(), command));
    } catch (IOException ioe) {
        LOG.error("Could not execute pipeline action={} pipeline={}", action, pid, ioe);
    }
}
Also used : CommandForDatanode(org.apache.hadoop.ozone.protocol.commands.CommandForDatanode) NotLeaderException(org.apache.ratis.protocol.exceptions.NotLeaderException) ClosePipelineInfo(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo) ClosePipelineCommand(org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand) IOException(java.io.IOException) PipelineAction(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand)

Example 4 with SCMCommand

use of org.apache.hadoop.ozone.protocol.commands.SCMCommand in project ozone by apache.

the class TestSCMNodeManager method testHandlingSCMCommandEvent.

@Test
public void testHandlingSCMCommandEvent() throws IOException, AuthenticationException {
    OzoneConfiguration conf = getConf();
    conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS);
    DatanodeDetails datanodeDetails = randomDatanodeDetails();
    UUID dnId = datanodeDetails.getUuid();
    String storagePath = testDir.getAbsolutePath() + "/" + dnId;
    StorageReportProto report = HddsTestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null);
    EventQueue eq = new EventQueue();
    try (SCMNodeManager nodemanager = createNodeManager(conf)) {
        eq.addHandler(DATANODE_COMMAND, nodemanager);
        nodemanager.register(datanodeDetails, HddsTestUtils.createNodeReport(Arrays.asList(report), Collections.emptyList()), HddsTestUtils.getRandomPipelineReports());
        eq.fireEvent(DATANODE_COMMAND, new CommandForDatanode<>(datanodeDetails.getUuid(), new CloseContainerCommand(1L, PipelineID.randomId())));
        LayoutVersionManager versionManager = nodemanager.getLayoutVersionManager();
        LayoutVersionProto layoutInfo = toLayoutVersionProto(versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion());
        eq.processAll(1000L);
        List<SCMCommand> command = nodemanager.processHeartbeat(datanodeDetails, layoutInfo);
        // With dh registered, SCM will send create pipeline command to dn
        Assert.assertTrue(command.size() >= 1);
        Assert.assertTrue(command.get(0).getClass().equals(CloseContainerCommand.class) || command.get(1).getClass().equals(CloseContainerCommand.class));
    } catch (IOException e) {
        e.printStackTrace();
        throw e;
    }
}
Also used : UpgradeUtils.toLayoutVersionProto(org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto) LayoutVersionProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto) CloseContainerCommand(org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) MetadataStorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto) StorageReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto) IOException(java.io.IOException) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) MockDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails) MockDatanodeDetails.createDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) LayoutVersionManager(org.apache.hadoop.ozone.upgrade.LayoutVersionManager) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) UUID(java.util.UUID) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand) Test(org.junit.Test)

Example 5 with SCMCommand

use of org.apache.hadoop.ozone.protocol.commands.SCMCommand in project ozone by apache.

the class ReconNodeManager method processHeartbeat.

/**
 * Send heartbeat to indicate the datanode is alive and doing well.
 *
 * @param datanodeDetails - DatanodeDetailsProto.
 * @param layoutInfo - Layout Version Proto
 * @return SCMheartbeat response.
 */
@Override
public List<SCMCommand> processHeartbeat(DatanodeDetails datanodeDetails, LayoutVersionProto layoutInfo) {
    List<SCMCommand> cmds = new ArrayList<>();
    long currentTime = Time.now();
    if (needUpdate(datanodeDetails, currentTime)) {
        cmds.add(new ReregisterCommand());
        LOG.info("Sending ReregisterCommand() for " + datanodeDetails.getHostName());
        datanodeHeartbeatMap.put(datanodeDetails.getUuid(), Time.now());
        return cmds;
    }
    // Update heartbeat map with current time
    datanodeHeartbeatMap.put(datanodeDetails.getUuid(), Time.now());
    cmds.addAll(super.processHeartbeat(datanodeDetails, layoutInfo));
    return cmds.stream().filter(c -> ALLOWED_COMMANDS.contains(c.getType())).collect(toList());
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Type(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) SCMStorageConfig(org.apache.hadoop.hdds.scm.server.SCMStorageConfig) NodeReportProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto) LoggerFactory(org.slf4j.LoggerFactory) HashMap(java.util.HashMap) SCMContext(org.apache.hadoop.hdds.scm.ha.SCMContext) ArrayList(java.util.ArrayList) NodeNotFoundException(org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) Map(java.util.Map) PipelineReportsProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) ImmutableSet(com.google.common.collect.ImmutableSet) Logger(org.slf4j.Logger) LayoutVersionProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto) Set(java.util.Set) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) RegisteredCommand(org.apache.hadoop.ozone.protocol.commands.RegisteredCommand) IOException(java.io.IOException) UUID(java.util.UUID) CommandForDatanode(org.apache.hadoop.ozone.protocol.commands.CommandForDatanode) ReregisterCommand(org.apache.hadoop.ozone.protocol.commands.ReregisterCommand) Type.reregisterCommand(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type.reregisterCommand) List(java.util.List) Collectors.toList(java.util.stream.Collectors.toList) VersionResponse(org.apache.hadoop.ozone.protocol.VersionResponse) Table(org.apache.hadoop.hdds.utils.db.Table) HddsServerUtil(org.apache.hadoop.hdds.utils.HddsServerUtil) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) Time(org.apache.hadoop.util.Time) VisibleForTesting(com.google.common.annotations.VisibleForTesting) TableIterator(org.apache.hadoop.hdds.utils.db.TableIterator) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand) SCMVersionRequestProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto) ReregisterCommand(org.apache.hadoop.ozone.protocol.commands.ReregisterCommand) ArrayList(java.util.ArrayList) SCMCommand(org.apache.hadoop.ozone.protocol.commands.SCMCommand)

Aggregations

SCMCommand (org.apache.hadoop.ozone.protocol.commands.SCMCommand)12 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)7 IOException (java.io.IOException)5 UUID (java.util.UUID)5 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)4 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)4 LayoutVersionProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto)4 Test (org.junit.Test)4 NetworkTopology (org.apache.hadoop.hdds.scm.net.NetworkTopology)3 EventQueue (org.apache.hadoop.hdds.server.events.EventQueue)3 HDDSLayoutVersionManager (org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager)3 CommandForDatanode (org.apache.hadoop.ozone.protocol.commands.CommandForDatanode)3 ArrayList (java.util.ArrayList)2 List (java.util.List)2 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)2 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)2 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)2 SCMCommandProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto)2 NetworkTopologyImpl (org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl)2 UpgradeUtils.toLayoutVersionProto (org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.toLayoutVersionProto)2