use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.
the class TestSCMDatanodeHeartbeatDispatcher method testNodeReportDispatcher.
@Test
public void testNodeReportDispatcher() throws IOException {
AtomicInteger eventReceived = new AtomicInteger();
NodeReportProto nodeReport = NodeReportProto.getDefaultInstance();
NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
Mockito.when(mockNodeManager.isNodeRegistered(Mockito.any())).thenReturn(true);
SCMDatanodeHeartbeatDispatcher dispatcher = new SCMDatanodeHeartbeatDispatcher(mockNodeManager, new EventPublisher() {
@Override
public <PAYLOAD, EVENT extends Event<PAYLOAD>> void fireEvent(EVENT event, PAYLOAD payload) {
Assert.assertEquals(event, NODE_REPORT);
eventReceived.incrementAndGet();
Assert.assertEquals(nodeReport, ((NodeReportFromDatanode) payload).getReport());
}
});
DatanodeDetails datanodeDetails = randomDatanodeDetails();
SCMHeartbeatRequestProto heartbeat = SCMHeartbeatRequestProto.newBuilder().setDatanodeDetails(datanodeDetails.getProtoBufMessage()).setNodeReport(nodeReport).build();
dispatcher.dispatch(heartbeat);
Assert.assertEquals(1, eventReceived.get());
}
use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.
the class TestPipelineDatanodesIntersection method testPipelineDatanodesIntersection.
@Test
public void testPipelineDatanodesIntersection() throws IOException {
NodeManager nodeManager = new MockNodeManager(true, nodeCount);
conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness);
conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
SCMHAManager scmhaManager = MockSCMHAManager.getInstance(true);
PipelineStateManager stateManager = PipelineStateManagerImpl.newBuilder().setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)).setRatisServer(scmhaManager.getRatisServer()).setNodeManager(nodeManager).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
PipelineProvider provider = new MockRatisPipelineProvider(nodeManager, stateManager, conf);
int healthyNodeCount = nodeManager.getNodeCount(NodeStatus.inServiceHealthy());
int intersectionCount = 0;
int createdPipelineCount = 0;
while (!end && createdPipelineCount <= healthyNodeCount * nodeHeaviness) {
try {
Pipeline pipeline = provider.create(RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(ClientVersions.CURRENT_VERSION);
stateManager.addPipeline(pipelineProto);
nodeManager.addPipeline(pipeline);
List<Pipeline> overlapPipelines = RatisPipelineUtils.checkPipelineContainSameDatanodes(stateManager, pipeline);
if (overlapPipelines.isEmpty()) {
intersectionCount++;
for (Pipeline overlapPipeline : overlapPipelines) {
LOG.info("This pipeline: " + pipeline.getId().toString() + " overlaps with previous pipeline: " + overlapPipeline.getId() + ". They share same set of datanodes as: " + pipeline.getNodesInOrder().get(0).getUuid() + "/" + pipeline.getNodesInOrder().get(1).getUuid() + "/" + pipeline.getNodesInOrder().get(2).getUuid() + " and " + overlapPipeline.getNodesInOrder().get(0).getUuid() + "/" + overlapPipeline.getNodesInOrder().get(1).getUuid() + "/" + overlapPipeline.getNodesInOrder().get(2).getUuid() + " is the same.");
}
}
createdPipelineCount++;
} catch (SCMException e) {
end = true;
} catch (IOException e) {
end = true;
// Should not throw regular IOException.
Assert.fail();
}
}
end = false;
LOG.info("Among total " + stateManager.getPipelines(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).size() + " created pipelines" + " with " + healthyNodeCount + " healthy datanodes and " + nodeHeaviness + " as node heaviness, " + intersectionCount + " pipelines has same set of datanodes.");
}
use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.
the class TestReconPipelineManager method testInitialize.
@Test
public void testInitialize() throws IOException {
// Get 3 OPEN pipelines from SCM.
List<Pipeline> pipelinesFromScm = getPipelines(3);
// Recon has 2 pipelines in ALLOCATED state. (1 is valid and 1 is obsolete)
// Valid pipeline in Allocated state.
Pipeline validPipeline = Pipeline.newBuilder().setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationFactor.ONE)).setId(pipelinesFromScm.get(0).getId()).setNodes(pipelinesFromScm.get(0).getNodes()).setState(Pipeline.PipelineState.ALLOCATED).build();
// Invalid pipeline.
Pipeline invalidPipeline = Pipeline.newBuilder().setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationFactor.ONE)).setId(PipelineID.randomId()).setNodes(Collections.singletonList(randomDatanodeDetails())).setState(Pipeline.PipelineState.CLOSED).build();
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, SCMContext.emptyContext(), versionManager);
try (ReconPipelineManager reconPipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManager, ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue, scmhaManager, scmContext)) {
scmContext = new SCMContext.Builder().setIsInSafeMode(true).setLeader(true).setIsPreCheckComplete(true).setSCM(mock(StorageContainerManager.class)).build();
reconPipelineManager.setScmContext(scmContext);
reconPipelineManager.addPipeline(validPipeline);
reconPipelineManager.addPipeline(invalidPipeline);
reconPipelineManager.initializePipelines(pipelinesFromScm);
List<Pipeline> newReconPipelines = reconPipelineManager.getPipelines();
// Test if the number of pipelines in SCM is as expected.
assertEquals(3, newReconPipelines.size());
// Test if new pipelines from SCM are picked up.
for (Pipeline pipeline : pipelinesFromScm) {
assertTrue(reconPipelineManager.containsPipeline(pipeline.getId()));
}
// Test if existing pipeline state is updated.
assertEquals(Pipeline.PipelineState.OPEN, reconPipelineManager.getPipeline(validPipeline.getId()).getPipelineState());
// Test if obsolete pipelines in Recon are removed.
assertFalse(reconPipelineManager.containsPipeline(invalidPipeline.getId()));
}
}
use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.
the class TestReconPipelineManager method testStubbedReconPipelineFactory.
@Test
public void testStubbedReconPipelineFactory() throws IOException {
NodeManager nodeManagerMock = mock(NodeManager.class);
ReconPipelineManager reconPipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManagerMock, ReconSCMDBDefinition.PIPELINES.getTable(store), new EventQueue(), scmhaManager, scmContext);
PipelineFactory pipelineFactory = reconPipelineManager.getPipelineFactory();
assertTrue(pipelineFactory instanceof ReconPipelineFactory);
ReconPipelineFactory reconPipelineFactory = (ReconPipelineFactory) pipelineFactory;
assertTrue(reconPipelineFactory.getProviders().isEmpty());
for (ReplicationType type : reconPipelineFactory.getProviders().keySet()) {
PipelineProvider pipelineProvider = reconPipelineFactory.getProviders().get(type);
assertTrue(pipelineProvider instanceof ReconPipelineProvider);
}
}
use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.
the class TestSCMContainerPlacementCapacity method chooseDatanodes.
@Test
public void chooseDatanodes() throws SCMException {
// given
OzoneConfiguration conf = new OzoneConfiguration();
// We are using small units here
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 1, StorageUnit.BYTES);
List<DatanodeInfo> datanodes = new ArrayList<>();
for (int i = 0; i < 7; i++) {
DatanodeInfo datanodeInfo = new DatanodeInfo(MockDatanodeDetails.randomDatanodeDetails(), NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null);
MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), 100L, 0, 100L, null);
datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
datanodes.add(datanodeInfo);
}
StorageReportProto storage2 = HddsTestUtils.createStorageReport(datanodes.get(2).getUuid(), "/data1-" + datanodes.get(2).getUuidString(), 100L, 90L, 10L, null);
datanodes.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
StorageReportProto storage3 = HddsTestUtils.createStorageReport(datanodes.get(3).getUuid(), "/data1-" + datanodes.get(3).getUuidString(), 100L, 80L, 20L, null);
datanodes.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
StorageReportProto storage4 = HddsTestUtils.createStorageReport(datanodes.get(4).getUuid(), "/data1-" + datanodes.get(4).getUuidString(), 100L, 70L, 30L, null);
datanodes.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
when(mockNodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
when(mockNodeManager.getNodeStat(anyObject())).thenReturn(new SCMNodeMetric(100L, 0L, 100L));
when(mockNodeManager.getNodeStat(datanodes.get(2))).thenReturn(new SCMNodeMetric(100L, 90L, 10L));
when(mockNodeManager.getNodeStat(datanodes.get(3))).thenReturn(new SCMNodeMetric(100L, 80L, 20L));
when(mockNodeManager.getNodeStat(datanodes.get(4))).thenReturn(new SCMNodeMetric(100L, 70L, 30L));
SCMContainerPlacementCapacity scmContainerPlacementRandom = new SCMContainerPlacementCapacity(mockNodeManager, conf, null, true, null);
List<DatanodeDetails> existingNodes = new ArrayList<>();
existingNodes.add(datanodes.get(0));
existingNodes.add(datanodes.get(1));
Map<DatanodeDetails, Integer> selectedCount = new HashMap<>();
for (DatanodeDetails datanode : datanodes) {
selectedCount.put(datanode, 0);
}
for (int i = 0; i < 1000; i++) {
// when
List<DatanodeDetails> datanodeDetails = scmContainerPlacementRandom.chooseDatanodes(existingNodes, null, 1, 15, 15);
// then
Assert.assertEquals(1, datanodeDetails.size());
DatanodeDetails datanode0Details = datanodeDetails.get(0);
Assert.assertNotEquals("Datanode 0 should not been selected: excluded by parameter", datanodes.get(0), datanode0Details);
Assert.assertNotEquals("Datanode 1 should not been selected: excluded by parameter", datanodes.get(1), datanode0Details);
Assert.assertNotEquals("Datanode 2 should not been selected: not enough space there", datanodes.get(2), datanode0Details);
selectedCount.put(datanode0Details, selectedCount.get(datanode0Details) + 1);
}
// datanode 6 has more space than datanode 3 and datanode 4.
Assert.assertTrue(selectedCount.get(datanodes.get(3)) < selectedCount.get(datanodes.get(6)));
Assert.assertTrue(selectedCount.get(datanodes.get(4)) < selectedCount.get(datanodes.get(6)));
}
Aggregations