use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode in project ozone by apache.
the class SCMDatanodeProtocolServer method register.
@Override
public SCMRegisteredResponseProto register(HddsProtos.ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto, NodeReportProto nodeReport, ContainerReportsProto containerReportsProto, PipelineReportsProto pipelineReportsProto, LayoutVersionProto layoutInfo) throws IOException {
DatanodeDetails datanodeDetails = DatanodeDetails.getFromProtoBuf(extendedDatanodeDetailsProto);
boolean auditSuccess = true;
Map<String, String> auditMap = Maps.newHashMap();
auditMap.put("datanodeDetails", datanodeDetails.toString());
// TODO : Return the list of Nodes that forms the SCM HA.
RegisteredCommand registeredCommand = scm.getScmNodeManager().register(datanodeDetails, nodeReport, pipelineReportsProto, layoutInfo);
if (registeredCommand.getError() == SCMRegisteredResponseProto.ErrorCode.success) {
eventPublisher.fireEvent(CONTAINER_REPORT, new SCMDatanodeHeartbeatDispatcher.ContainerReportFromDatanode(datanodeDetails, containerReportsProto));
eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT, new NodeRegistrationContainerReport(datanodeDetails, containerReportsProto));
eventPublisher.fireEvent(PIPELINE_REPORT, new PipelineReportFromDatanode(datanodeDetails, pipelineReportsProto));
}
try {
return getRegisteredResponse(registeredCommand);
} catch (Exception ex) {
auditSuccess = false;
AUDIT.logWriteFailure(buildAuditMessageForFailure(SCMAction.REGISTER, auditMap, ex));
throw ex;
} finally {
if (auditSuccess) {
AUDIT.logWriteSuccess(buildAuditMessageForSuccess(SCMAction.REGISTER, auditMap));
}
}
}
Aggregations