use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport in project ozone by apache.
the class XceiverServerRatis method getPipelineReport.
@Override
public List<PipelineReport> getPipelineReport() {
try {
Iterable<RaftGroupId> gids = server.getGroupIds();
List<PipelineReport> reports = new ArrayList<>();
for (RaftGroupId groupId : gids) {
HddsProtos.PipelineID pipelineID = PipelineID.valueOf(groupId.getUuid()).getProtobuf();
reports.add(PipelineReport.newBuilder().setPipelineID(pipelineID).setIsLeader(groupLeaderMap.getOrDefault(groupId, Boolean.FALSE)).setBytesWritten(calculatePipelineBytesWritten(pipelineID)).build());
}
return reports;
} catch (Exception e) {
return null;
}
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport in project ozone by apache.
the class PipelineReportHandler method onMessage.
@Override
public void onMessage(PipelineReportFromDatanode pipelineReportFromDatanode, EventPublisher publisher) {
Preconditions.checkNotNull(pipelineReportFromDatanode);
DatanodeDetails dn = pipelineReportFromDatanode.getDatanodeDetails();
PipelineReportsProto pipelineReport = pipelineReportFromDatanode.getReport();
Preconditions.checkNotNull(dn, "Pipeline Report is missing DatanodeDetails.");
if (LOGGER.isTraceEnabled()) {
LOGGER.trace("Processing pipeline report for dn: {}", dn);
}
for (PipelineReport report : pipelineReport.getPipelineReportList()) {
try {
processPipelineReport(report, dn, publisher);
} catch (NotLeaderException ex) {
// Avoid NotLeaderException logging which happens when processing
// pipeline report on followers.
} catch (PipelineNotFoundException e) {
LOGGER.error("Could not find pipeline {}", report.getPipelineID());
} catch (IOException e) {
LOGGER.error("Could not process pipeline report={} from dn={}.", report, dn, e);
}
}
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport in project ozone by apache.
the class TestPipelineClose method testPipelineCloseWithPipelineAction.
@Test
public void testPipelineCloseWithPipelineAction() throws Exception {
List<DatanodeDetails> dns = ratisContainer.getPipeline().getNodes();
PipelineActionsFromDatanode pipelineActionsFromDatanode = HddsTestUtils.getPipelineActionFromDatanode(dns.get(0), ratisContainer.getPipeline().getId());
// send closing action for pipeline
PipelineActionHandler pipelineActionHandler = new PipelineActionHandler(pipelineManager, SCMContext.emptyContext(), conf);
pipelineActionHandler.onMessage(pipelineActionsFromDatanode, new EventQueue());
Thread.sleep(5000);
OzoneContainer ozoneContainer = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().getContainer();
List<PipelineReport> pipelineReports = ozoneContainer.getPipelineReport().getPipelineReportList();
for (PipelineReport pipelineReport : pipelineReports) {
// ensure the pipeline is not reported by any dn
Assert.assertNotEquals(PipelineID.getFromProtobuf(pipelineReport.getPipelineID()), ratisContainer.getPipeline().getId());
}
try {
pipelineManager.getPipeline(ratisContainer.getPipeline().getId());
Assert.fail("Pipeline should not exist in SCM");
} catch (PipelineNotFoundException e) {
}
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport in project ozone by apache.
the class TestReconPipelineReportHandler method testProcessPipelineReport.
@Test
public void testProcessPipelineReport() throws IOException {
// Check with pipeline which does not exist in Recon.
Pipeline pipeline = getRandomPipeline();
PipelineID pipelineID = pipeline.getId();
HddsProtos.PipelineID pipelineIDProto = pipelineID.getProtobuf();
ReconPipelineManager reconPipelineManagerMock = mock(ReconPipelineManager.class);
when(reconPipelineManagerMock.getPipeline(pipelineID)).thenReturn(pipeline);
StorageContainerServiceProvider scmServiceProviderMock = mock(StorageContainerServiceProvider.class);
when(scmServiceProviderMock.getPipeline(pipelineIDProto)).thenReturn(pipeline);
OzoneConfiguration configuration = new OzoneConfiguration();
ReconPipelineReportHandler handler = new ReconPipelineReportHandler(new ReconSafeModeManager(), reconPipelineManagerMock, SCMContext.emptyContext(), configuration, scmServiceProviderMock);
EventPublisher eventPublisherMock = mock(EventPublisher.class);
PipelineReport report = mock(PipelineReport.class);
when(report.getPipelineID()).thenReturn(pipelineIDProto);
handler.processPipelineReport(report, pipeline.getNodes().get(0), eventPublisherMock);
// Verify that the new pipeline was added to pipeline manager.
verify(reconPipelineManagerMock, times(1)).addPipeline(pipeline);
verify(reconPipelineManagerMock, times(1)).getPipeline(pipelineID);
// Check with pipeline which already exists in Recon.
pipeline = getRandomPipeline();
pipelineID = pipeline.getId();
pipelineIDProto = pipelineID.getProtobuf();
when(reconPipelineManagerMock.containsPipeline(pipelineID)).thenReturn(true);
when(reconPipelineManagerMock.getPipeline(pipelineID)).thenReturn(pipeline);
when(report.getPipelineID()).thenReturn(pipelineIDProto);
handler.processPipelineReport(report, pipeline.getNodes().get(0), eventPublisherMock);
// Verify that the pipeline was not added to pipeline manager.
verify(reconPipelineManagerMock, times(0)).addPipeline(pipeline);
verify(reconPipelineManagerMock, times(1)).getPipeline(pipelineID);
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport in project ozone by apache.
the class TestEndpoints method setUp.
@Before
public void setUp() throws Exception {
// The following setup runs only once
if (!isSetupDone) {
initializeInjector();
isSetupDone = true;
}
String datanodeId = datanodeDetails.getUuid().toString();
String datanodeId2 = datanodeDetails2.getUuid().toString();
containerReportsProto = ContainerReportsProto.newBuilder().addReports(ContainerReplicaProto.newBuilder().setContainerID(containerId).setState(ContainerReplicaProto.State.OPEN).setOriginNodeId(datanodeId).build()).build();
UUID pipelineUuid = UUID.fromString(pipelineId);
HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder().setMostSigBits(pipelineUuid.getMostSignificantBits()).setLeastSigBits(pipelineUuid.getLeastSignificantBits()).build();
PipelineReport pipelineReport = PipelineReport.newBuilder().setPipelineID(PipelineID.newBuilder().setId(pipelineId).setUuid128(uuid128).build()).setIsLeader(true).build();
PipelineReportsProto pipelineReportsProto = PipelineReportsProto.newBuilder().addPipelineReport(pipelineReport).build();
DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder().setHostName(HOST1).setUuid(datanodeId).setIpAddress(IP1).build();
extendedDatanodeDetailsProto = HddsProtos.ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto).setVersion("0.6.0").setSetupTime(1596347628802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
StorageReportProto storageReportProto1 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400).setCapacity(25000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
StorageReportProto storageReportProto2 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(25000).setRemaining(10000).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
NodeReportProto nodeReportProto = NodeReportProto.newBuilder().addStorageReport(storageReportProto1).addStorageReport(storageReportProto2).build();
DatanodeDetailsProto datanodeDetailsProto2 = DatanodeDetailsProto.newBuilder().setHostName(HOST2).setUuid(datanodeId2).setIpAddress(IP2).build();
ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto2).setVersion("0.6.0").setSetupTime(1596347636802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
StorageReportProto storageReportProto3 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
StorageReportProto storageReportProto4 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000).setCapacity(80000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
NodeReportProto nodeReportProto2 = NodeReportProto.newBuilder().addStorageReport(storageReportProto3).addStorageReport(storageReportProto4).build();
LayoutVersionProto layoutInfo = defaultLayoutVersionProto();
try {
reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, layoutInfo);
reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto2, nodeReportProto2, ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build(), defaultLayoutVersionProto());
// Process all events in the event queue
reconScm.getEventQueue().processAll(1000);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
// Write Data to OM
// A sample volume (sampleVol) and a bucket (bucketOne) is already created
// in AbstractOMMetadataManagerTest.
// Create a new volume and bucket and then write keys to the bucket.
String volumeKey = reconOMMetadataManager.getVolumeKey("sampleVol2");
OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol2").setAdminName("TestUser").setOwnerName("TestUser").build();
reconOMMetadataManager.getVolumeTable().put(volumeKey, args);
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("sampleVol2").setBucketName("bucketOne").build();
String bucketKey = reconOMMetadataManager.getBucketKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName());
reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
// key = key_one
writeDataToOm(reconOMMetadataManager, "key_one");
// key = key_two
writeDataToOm(reconOMMetadataManager, "key_two");
// key = key_three
writeDataToOm(reconOMMetadataManager, "key_three");
// Truncate global stats table before running each test
dslContext.truncate(GLOBAL_STATS);
}
Aggregations