use of org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand in project ozone by apache.
the class ClosePipelineCommandHandler method handle.
/**
* Handles a given SCM command.
*
* @param command - SCM Command
* @param ozoneContainer - Ozone Container.
* @param context - Current Context.
* @param connectionManager - The SCMs that we are talking to.
*/
@Override
public void handle(SCMCommand command, OzoneContainer ozoneContainer, StateContext context, SCMConnectionManager connectionManager) {
invocationCount.incrementAndGet();
final long startTime = Time.monotonicNow();
final DatanodeDetails dn = context.getParent().getDatanodeDetails();
ClosePipelineCommand closePipelineCommand = (ClosePipelineCommand) command;
final PipelineID pipelineID = closePipelineCommand.getPipelineID();
final HddsProtos.PipelineID pipelineIdProto = pipelineID.getProtobuf();
try {
XceiverServerSpi server = ozoneContainer.getWriteChannel();
if (server.isExist(pipelineIdProto)) {
server.removeGroup(pipelineIdProto);
LOG.info("Close Pipeline {} command on datanode {}.", pipelineID, dn.getUuidString());
} else {
LOG.debug("Ignoring close pipeline command for pipeline {} " + "as it does not exist", pipelineID);
}
} catch (IOException e) {
LOG.error("Can't close pipeline {}", pipelineID, e);
} finally {
long endTime = Time.monotonicNow();
totalTime += endTime - startTime;
}
}
use of org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand in project ozone by apache.
the class PipelineReportHandler method processPipelineReport.
protected void processPipelineReport(PipelineReport report, DatanodeDetails dn, EventPublisher publisher) throws IOException {
PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID());
Pipeline pipeline;
try {
pipeline = pipelineManager.getPipeline(pipelineID);
} catch (PipelineNotFoundException e) {
if (scmContext.isLeader()) {
LOGGER.info("Reported pipeline {} is not found", pipelineID);
SCMCommand<?> command = new ClosePipelineCommand(pipelineID);
command.setTerm(scmContext.getTermOfLeader());
publisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(dn.getUuid(), command));
}
return;
}
setReportedDatanode(pipeline, dn);
setPipelineLeaderId(report, pipeline, dn);
if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) {
if (LOGGER.isDebugEnabled()) {
LOGGER.debug("Pipeline {} {} reported by {}", pipeline.getReplicationConfig(), pipeline.getId(), dn);
}
if (pipeline.isHealthy()) {
pipelineManager.openPipeline(pipelineID);
}
}
if (pipeline.isHealthy()) {
if (pipelineAvailabilityCheck && scmSafeModeManager.getInSafeMode()) {
publisher.fireEvent(SCMEvents.OPEN_PIPELINE, pipeline);
}
}
}
use of org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand in project ozone by apache.
the class PipelineActionHandler method processPipelineAction.
/**
* Process the given PipelineAction.
*
* @param datanode the datanode which has sent the PipelineAction
* @param pipelineAction the PipelineAction
* @param publisher EventPublisher to fire new events if required
*/
private void processPipelineAction(final DatanodeDetails datanode, final PipelineAction pipelineAction, final EventPublisher publisher) {
final ClosePipelineInfo info = pipelineAction.getClosePipeline();
final PipelineAction.Action action = pipelineAction.getAction();
final PipelineID pid = PipelineID.getFromProtobuf(info.getPipelineID());
try {
LOG.info("Received pipeline action {} for {} from datanode {}. " + "Reason : {}", action, pid, datanode.getUuidString(), info.getDetailedReason());
if (action == PipelineAction.Action.CLOSE) {
pipelineManager.closePipeline(pipelineManager.getPipeline(pid), false);
} else {
LOG.error("unknown pipeline action:{}", action);
}
} catch (PipelineNotFoundException e) {
LOG.warn("Pipeline action {} received for unknown pipeline {}, " + "firing close pipeline event.", action, pid);
SCMCommand<?> command = new ClosePipelineCommand(pid);
try {
command.setTerm(scmContext.getTermOfLeader());
} catch (NotLeaderException nle) {
LOG.warn("Skip sending ClosePipelineCommand for pipeline {}," + " since not leader SCM.", pid);
return;
}
publisher.fireEvent(SCMEvents.DATANODE_COMMAND, new CommandForDatanode<>(datanode.getUuid(), command));
} catch (IOException ioe) {
LOG.error("Could not execute pipeline action={} pipeline={}", action, pid, ioe);
}
}
use of org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand in project ozone by apache.
the class HeartbeatEndpointTask method processResponse.
/**
* Add this command to command processing Queue.
*
* @param response - SCMHeartbeat response.
*/
private void processResponse(SCMHeartbeatResponseProto response, final DatanodeDetailsProto datanodeDetails) {
Preconditions.checkState(response.getDatanodeUUID().equalsIgnoreCase(datanodeDetails.getUuid()), "Unexpected datanode ID in the response.");
// Verify the response is indeed for this datanode.
for (SCMCommandProto commandResponseProto : response.getCommandsList()) {
switch(commandResponseProto.getCommandType()) {
case reregisterCommand:
if (rpcEndpoint.getState() == EndPointStates.HEARTBEAT) {
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM notification to register." + " Interrupt HEARTBEAT and transit to REGISTER state.");
}
rpcEndpoint.setState(EndPointStates.REGISTER);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Illegal state {} found, expecting {}.", rpcEndpoint.getState().name(), EndPointStates.HEARTBEAT);
}
}
break;
case deleteBlocksCommand:
DeleteBlocksCommand deleteBlocksCommand = DeleteBlocksCommand.getFromProtobuf(commandResponseProto.getDeleteBlocksCommandProto());
if (commandResponseProto.hasTerm()) {
deleteBlocksCommand.setTerm(commandResponseProto.getTerm());
}
if (!deleteBlocksCommand.blocksTobeDeleted().isEmpty()) {
if (LOG.isDebugEnabled()) {
LOG.debug(DeletedContainerBlocksSummary.getFrom(deleteBlocksCommand.blocksTobeDeleted()).toString());
}
this.context.addCommand(deleteBlocksCommand);
}
break;
case closeContainerCommand:
CloseContainerCommand closeContainer = CloseContainerCommand.getFromProtobuf(commandResponseProto.getCloseContainerCommandProto());
if (commandResponseProto.hasTerm()) {
closeContainer.setTerm(commandResponseProto.getTerm());
}
if (commandResponseProto.hasEncodedToken()) {
closeContainer.setEncodedToken(commandResponseProto.getEncodedToken());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM container close request for container {}", closeContainer.getContainerID());
}
this.context.addCommand(closeContainer);
break;
case replicateContainerCommand:
ReplicateContainerCommand replicateContainerCommand = ReplicateContainerCommand.getFromProtobuf(commandResponseProto.getReplicateContainerCommandProto());
if (commandResponseProto.hasTerm()) {
replicateContainerCommand.setTerm(commandResponseProto.getTerm());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM container replicate request for container {}", replicateContainerCommand.getContainerID());
}
this.context.addCommand(replicateContainerCommand);
break;
case deleteContainerCommand:
DeleteContainerCommand deleteContainerCommand = DeleteContainerCommand.getFromProtobuf(commandResponseProto.getDeleteContainerCommandProto());
if (commandResponseProto.hasTerm()) {
deleteContainerCommand.setTerm(commandResponseProto.getTerm());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM delete container request for container {}", deleteContainerCommand.getContainerID());
}
this.context.addCommand(deleteContainerCommand);
break;
case createPipelineCommand:
CreatePipelineCommand createPipelineCommand = CreatePipelineCommand.getFromProtobuf(commandResponseProto.getCreatePipelineCommandProto());
if (commandResponseProto.hasTerm()) {
createPipelineCommand.setTerm(commandResponseProto.getTerm());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM create pipeline request {}", createPipelineCommand.getPipelineID());
}
this.context.addCommand(createPipelineCommand);
break;
case closePipelineCommand:
ClosePipelineCommand closePipelineCommand = ClosePipelineCommand.getFromProtobuf(commandResponseProto.getClosePipelineCommandProto());
if (commandResponseProto.hasTerm()) {
closePipelineCommand.setTerm(commandResponseProto.getTerm());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM close pipeline request {}", closePipelineCommand.getPipelineID());
}
this.context.addCommand(closePipelineCommand);
break;
case setNodeOperationalStateCommand:
SetNodeOperationalStateCommand setNodeOperationalStateCommand = SetNodeOperationalStateCommand.getFromProtobuf(commandResponseProto.getSetNodeOperationalStateCommandProto());
if (commandResponseProto.hasTerm()) {
setNodeOperationalStateCommand.setTerm(commandResponseProto.getTerm());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM set operational state command. State: {} " + "Expiry: {}", setNodeOperationalStateCommand.getOpState(), setNodeOperationalStateCommand.getStateExpiryEpochSeconds());
}
this.context.addCommand(setNodeOperationalStateCommand);
break;
case finalizeNewLayoutVersionCommand:
FinalizeNewLayoutVersionCommand finalizeNewLayoutVersionCommand = FinalizeNewLayoutVersionCommand.getFromProtobuf(commandResponseProto.getFinalizeNewLayoutVersionCommandProto());
if (commandResponseProto.hasTerm()) {
finalizeNewLayoutVersionCommand.setTerm(commandResponseProto.getTerm());
}
if (LOG.isDebugEnabled()) {
LOG.debug("Received SCM finalize command {}", finalizeNewLayoutVersionCommand.getId());
}
this.context.addCommand(finalizeNewLayoutVersionCommand);
break;
case refreshVolumeUsageInfo:
RefreshVolumeUsageCommand refreshVolumeUsageCommand = RefreshVolumeUsageCommand.getFromProtobuf(commandResponseProto.getRefreshVolumeUsageCommandProto());
if (commandResponseProto.hasTerm()) {
refreshVolumeUsageCommand.setTerm(commandResponseProto.getTerm());
}
this.context.addCommand(refreshVolumeUsageCommand);
break;
default:
throw new IllegalArgumentException("Unknown response : " + commandResponseProto.getCommandType().name());
}
}
}
use of org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand in project ozone by apache.
the class RatisPipelineProvider method close.
/**
* Removes pipeline from SCM. Sends command to destroy pipeline on all
* the datanodes.
*
* @param pipeline - Pipeline to be destroyed
* @throws NotLeaderException - Send datanode command while not leader
*/
@Override
public void close(Pipeline pipeline) throws NotLeaderException {
final ClosePipelineCommand closeCommand = new ClosePipelineCommand(pipeline.getId());
closeCommand.setTerm(scmContext.getTermOfLeader());
pipeline.getNodes().forEach(node -> {
final CommandForDatanode<?> datanodeCommand = new CommandForDatanode<>(node.getUuid(), closeCommand);
LOG.info("Send pipeline:{} close command to datanode {}", pipeline.getId(), datanodeCommand.getDatanodeId());
eventPublisher.fireEvent(SCMEvents.DATANODE_COMMAND, datanodeCommand);
});
}
Aggregations