use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto in project ozone by apache.
the class RegisterEndpointTask method call.
/**
* Computes a result, or throws an exception if unable to do so.
*
* @return computed result
* @throws Exception if unable to compute a result
*/
@Override
public EndpointStateMachine.EndPointStates call() throws Exception {
if (getDatanodeDetails() == null) {
LOG.error("DatanodeDetails cannot be null in RegisterEndpoint task, " + "shutting down the endpoint.");
return rpcEndPoint.setState(EndpointStateMachine.EndPointStates.SHUTDOWN);
}
rpcEndPoint.lock();
try {
if (rpcEndPoint.getState().equals(EndpointStateMachine.EndPointStates.REGISTER)) {
LayoutVersionProto layoutInfo = LayoutVersionProto.newBuilder().setMetadataLayoutVersion(layoutVersionManager.getMetadataLayoutVersion()).setSoftwareLayoutVersion(layoutVersionManager.getSoftwareLayoutVersion()).build();
ContainerReportsProto containerReport = datanodeContainerManager.getController().getContainerReport();
NodeReportProto nodeReport = datanodeContainerManager.getNodeReport();
PipelineReportsProto pipelineReportsProto = datanodeContainerManager.getPipelineReport();
// TODO : Add responses to the command Queue.
SCMRegisteredResponseProto response = rpcEndPoint.getEndPoint().register(datanodeDetails.getExtendedProtoBufMessage(), nodeReport, containerReport, pipelineReportsProto, layoutInfo);
Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()).equals(datanodeDetails.getUuid()), "Unexpected datanode ID in the response.");
Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()), "Invalid cluster ID in the response.");
Preconditions.checkState(response.getErrorCode() == success, "DataNode has higher Software Layout Version than SCM.");
if (response.hasHostname() && response.hasIpAddress()) {
datanodeDetails.setHostName(response.getHostname());
datanodeDetails.setIpAddress(response.getIpAddress());
}
if (response.hasNetworkName() && response.hasNetworkLocation()) {
datanodeDetails.setNetworkName(response.getNetworkName());
datanodeDetails.setNetworkLocation(response.getNetworkLocation());
}
EndpointStateMachine.EndPointStates nextState = rpcEndPoint.getState().getNextState();
rpcEndPoint.setState(nextState);
rpcEndPoint.zeroMissedCount();
this.stateContext.configureHeartbeatFrequency();
}
} catch (IOException ex) {
rpcEndPoint.logIfNeeded(ex);
} finally {
rpcEndPoint.unlock();
}
return rpcEndPoint.getState();
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto in project ozone by apache.
the class PipelineReportHandler method onMessage.
@Override
public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, EventPublisher publisher) {
Preconditions.checkNotNull(pipelineReportFromDatanode);
DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails();
PipelineReportsProto pipelineReport = pipelineReportFromDatanode.getReport();
Preconditions.checkNotNull(dn, "Pipeline Report is missing DatanodeDetails.");
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Processing pipeline report for dn: {}", dn);
}
for (PipelineReport report : pipelineReport.getPipelineReportList()) {
try {
processPipelineReport(report, dn, publisher);
} catch (NotLeaderException ex) {
// Avoid NotLeaderException logging which happens when processing
// pipeline report on followers.
} catch (PipelineNotFoundException e) {
LOGGER.error("Could not find pipeline {}", report.getPipelineID());
} catch (IOException e) {
LOGGER.error("Could not process pipeline report={} from dn={}.", report, dn, e);
}
}
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto in project ozone by apache.
the class TestEndpoints method setUp.
@Before
public void setUp() throws Exception {
// The following setup runs only once
if (!isSetupDone) {
initializeInjector();
isSetupDone = true;
}
String datanodeId = datanodeDetails.getUuid().toString();
String datanodeId2 = datanodeDetails2.getUuid().toString();
containerReportsProto = ContainerReportsProto.newBuilder().addReports(ContainerReplicaProto.newBuilder().setContainerID(containerId).setState(ContainerReplicaProto.State.OPEN).setOriginNodeId(datanodeId).build()).build();
UUID pipelineUuid = UUID.fromString(pipelineId);
HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder().setMostSigBits(pipelineUuid.getMostSignificantBits()).setLeastSigBits(pipelineUuid.getLeastSignificantBits()).build();
PipelineReport pipelineReport = PipelineReport.newBuilder().setPipelineID(PipelineID.newBuilder().setId(pipelineId).setUuid128(uuid128).build()).setIsLeader(true).build();
PipelineReportsProto pipelineReportsProto = PipelineReportsProto.newBuilder().addPipelineReport(pipelineReport).build();
DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder().setHostName(HOST1).setUuid(datanodeId).setIpAddress(IP1).build();
extendedDatanodeDetailsProto = HddsProtos.ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto).setVersion("0.6.0").setSetupTime(1596347628802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
StorageReportProto storageReportProto1 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400).setCapacity(25000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
StorageReportProto storageReportProto2 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(25000).setRemaining(10000).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
NodeReportProto nodeReportProto = NodeReportProto.newBuilder().addStorageReport(storageReportProto1).addStorageReport(storageReportProto2).build();
DatanodeDetailsProto datanodeDetailsProto2 = DatanodeDetailsProto.newBuilder().setHostName(HOST2).setUuid(datanodeId2).setIpAddress(IP2).build();
ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto2).setVersion("0.6.0").setSetupTime(1596347636802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
StorageReportProto storageReportProto3 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
StorageReportProto storageReportProto4 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000).setCapacity(80000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
NodeReportProto nodeReportProto2 = NodeReportProto.newBuilder().addStorageReport(storageReportProto3).addStorageReport(storageReportProto4).build();
LayoutVersionProto layoutInfo = defaultLayoutVersionProto();
try {
reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, layoutInfo);
reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto2, nodeReportProto2, ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build(), defaultLayoutVersionProto());
// Process all events in the event queue
reconScm.getEventQueue().processAll(1000);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
// Write Data to OM
// A sample volume (sampleVol) and a bucket (bucketOne) is already created
// in AbstractOMMetadataManagerTest.
// Create a new volume and bucket and then write keys to the bucket.
String volumeKey = reconOMMetadataManager.getVolumeKey("sampleVol2");
OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol2").setAdminName("TestUser").setOwnerName("TestUser").build();
reconOMMetadataManager.getVolumeTable().put(volumeKey, args);
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("sampleVol2").setBucketName("bucketOne").build();
String bucketKey = reconOMMetadataManager.getBucketKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName());
reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
// key = key_one
writeDataToOm(reconOMMetadataManager, "key_one");
// key = key_two
writeDataToOm(reconOMMetadataManager, "key_two");
// key = key_three
writeDataToOm(reconOMMetadataManager, "key_three");
// Truncate global stats table before running each test
dslContext.truncate(GLOBAL_STATS);
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto in project ozone by apache.
the class StorageContainerDatanodeProtocolServerSideTranslatorPB method register.
public SCMRegisteredResponseProto register(SCMRegisterRequestProto request) throws IOException {
ContainerReportsProto containerRequestProto = request.getContainerReport();
NodeReportProto dnNodeReport = request.getNodeReport();
PipelineReportsProto pipelineReport = request.getPipelineReports();
LayoutVersionProto layoutInfo = null;
if (request.hasDataNodeLayoutVersion()) {
layoutInfo = request.getDataNodeLayoutVersion();
} else {
// Backward compatibility to make sure old Datanodes can still talk to
// SCM.
layoutInfo = toLayoutVersionProto(INITIAL_VERSION.layoutVersion(), INITIAL_VERSION.layoutVersion());
}
return impl.register(request.getExtendedDatanodeDetails(), dnNodeReport, containerRequestProto, pipelineReport, layoutInfo);
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto in project ozone by apache.
the class TestOneReplicaPipelineSafeModeRule method firePipelineEvent.
private void firePipelineEvent(List<Pipeline> pipelines) {
Map<DatanodeDetails, PipelineReportsProto.Builder> reportMap = new HashMap<>();
for (Pipeline pipeline : pipelines) {
for (DatanodeDetails dn : pipeline.getNodes()) {
reportMap.putIfAbsent(dn, PipelineReportsProto.newBuilder());
}
}
for (DatanodeDetails dn : reportMap.keySet()) {
List<PipelineReport> reports = new ArrayList<>();
for (PipelineID pipeline : mockNodeManager.getNode2PipelineMap().getPipelines(dn.getUuid())) {
try {
if (!pipelines.contains(pipelineManager.getPipeline(pipeline))) {
continue;
}
} catch (PipelineNotFoundException pnfe) {
continue;
}
HddsProtos.PipelineID pipelineID = pipeline.getProtobuf();
reports.add(PipelineReport.newBuilder().setPipelineID(pipelineID).setIsLeader(true).setBytesWritten(0).build());
}
PipelineReportsProto.Builder pipelineReportsProto = PipelineReportsProto.newBuilder();
pipelineReportsProto.addAllPipelineReport(reports);
eventQueue.fireEvent(SCMEvents.PIPELINE_REPORT, new SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode(dn, pipelineReportsProto.build()));
}
}
Aggregations