use of org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails in project ozone by apache.
the class TestSCMNodeManager method testGetNodeInfo.
@Test
public void testGetNodeInfo() throws IOException, InterruptedException, NodeNotFoundException, AuthenticationException {
OzoneConfiguration conf = getConf();
final int nodeCount = 6;
SCMNodeManager nodeManager = createNodeManager(conf);
for (int i = 0; i < nodeCount; i++) {
DatanodeDetails datanodeDetails = MockDatanodeDetails.randomDatanodeDetails();
final long capacity = 2000;
final long used = 100;
final long remaining = 1900;
UUID dnId = datanodeDetails.getUuid();
String storagePath = testDir.getAbsolutePath() + "/" + dnId;
StorageReportProto report = HddsTestUtils.createStorageReport(dnId, storagePath, capacity, used, remaining, null);
nodeManager.register(datanodeDetails, HddsTestUtils.createNodeReport(Arrays.asList(report), Collections.emptyList()), HddsTestUtils.getRandomPipelineReports());
LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
LayoutVersionProto layoutInfo = toLayoutVersionProto(versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion());
nodeManager.register(datanodeDetails, HddsTestUtils.createNodeReport(Arrays.asList(report), Collections.emptyList()), HddsTestUtils.getRandomPipelineReports(), layoutInfo);
nodeManager.processHeartbeat(datanodeDetails, layoutInfo);
if (i == 5) {
nodeManager.setNodeOperationalState(datanodeDetails, HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE);
}
if (i == 3 || i == 4) {
nodeManager.setNodeOperationalState(datanodeDetails, HddsProtos.NodeOperationalState.DECOMMISSIONED);
}
}
Thread.sleep(100);
Map<String, Long> stats = nodeManager.getNodeInfo();
// 3 IN_SERVICE nodes:
assertEquals(6000, stats.get("DiskCapacity").longValue());
assertEquals(300, stats.get("DiskUsed").longValue());
assertEquals(5700, stats.get("DiskRemaining").longValue());
// 2 Decommissioned nodes
assertEquals(4000, stats.get("DecommissionedDiskCapacity").longValue());
assertEquals(200, stats.get("DecommissionedDiskUsed").longValue());
assertEquals(3800, stats.get("DecommissionedDiskRemaining").longValue());
// 1 Maintenance node
assertEquals(2000, stats.get("MaintenanceDiskCapacity").longValue());
assertEquals(100, stats.get("MaintenanceDiskUsed").longValue());
assertEquals(1900, stats.get("MaintenanceDiskRemaining").longValue());
}
use of org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails in project ozone by apache.
the class TestSCMNodeManager method registerWithCapacity.
/**
* Create {@link DatanodeDetails} to register with {@code nodeManager}, and
* provide the datanode maximum capacity so that space used does not block
* pipeline creation. Also check that the result of registering matched
* {@code expectedResult}.
* @return The created {@link DatanodeDetails}.
*/
private DatanodeDetails registerWithCapacity(SCMNodeManager nodeManager, LayoutVersionProto layout, ErrorCode expectedResult) {
DatanodeDetails details = MockDatanodeDetails.randomDatanodeDetails();
StorageReportProto storageReport = HddsTestUtils.createStorageReport(details.getUuid(), details.getNetworkFullPath(), Long.MAX_VALUE);
MetadataStorageReportProto metadataStorageReport = HddsTestUtils.createMetadataStorageReport(details.getNetworkFullPath(), Long.MAX_VALUE);
RegisteredCommand cmd = nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), HddsTestUtils.createNodeReport(Arrays.asList(storageReport), Arrays.asList(metadataStorageReport)), getRandomPipelineReports(), layout);
Assert.assertEquals(expectedResult, cmd.getError());
return cmd.getDatanode();
}
use of org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails in project ozone by apache.
the class TestSCMNodeManager method tesVolumeInfoFromNodeReport.
/**
* Test multiple nodes sending initial heartbeat with their node report
* with multiple volumes.
*
* @throws IOException
* @throws InterruptedException
* @throws TimeoutException
*/
@Test
public void tesVolumeInfoFromNodeReport() throws IOException, InterruptedException, AuthenticationException {
OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, MILLISECONDS);
final int volumeCount = 10;
final long capacity = 2000;
final long used = 100;
List<DatanodeDetails> dnList = new ArrayList<>(1);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
EventQueue eventQueue = (EventQueue) scm.getEventQueue();
DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
dnList.add(dn);
UUID dnId = dn.getUuid();
long free = capacity - used;
List<StorageReportProto> reports = new ArrayList<>(volumeCount);
boolean failed = true;
for (int x = 0; x < volumeCount; x++) {
String storagePath = testDir.getAbsolutePath() + "/" + dnId;
reports.add(HddsTestUtils.createStorageReport(dnId, storagePath, capacity, used, free, null, failed));
failed = !failed;
}
nodeManager.register(dn, HddsTestUtils.createNodeReport(reports, Collections.emptyList()), null);
LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
LayoutVersionProto layoutInfo = toLayoutVersionProto(versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion());
nodeManager.processHeartbeat(dn, layoutInfo);
// TODO: wait for EventQueue to be processed
eventQueue.processAll(8000L);
assertEquals(1, nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
assertEquals(volumeCount / 2, nodeManager.minHealthyVolumeNum(dnList));
dnList.clear();
}
}
use of org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails in project ozone by apache.
the class TestSCMNodeManager method testScmStatsFromNodeReport.
/**
* Test multiple nodes sending initial heartbeat with their node report.
*
* @throws IOException
* @throws InterruptedException
* @throws TimeoutException
*/
@Test
public void testScmStatsFromNodeReport() throws IOException, InterruptedException, AuthenticationException {
OzoneConfiguration conf = getConf();
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 1000, MILLISECONDS);
final int nodeCount = 10;
final long capacity = 2000;
final long used = 100;
final long remaining = capacity - used;
List<DatanodeDetails> dnList = new ArrayList<>(nodeCount);
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
LayoutVersionProto layoutInfo = toLayoutVersionProto(versionManager.getMetadataLayoutVersion(), versionManager.getSoftwareLayoutVersion());
EventQueue eventQueue = (EventQueue) scm.getEventQueue();
for (int x = 0; x < nodeCount; x++) {
DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
dnList.add(dn);
UUID dnId = dn.getUuid();
long free = capacity - used;
String storagePath = testDir.getAbsolutePath() + "/" + dnId;
StorageReportProto report = HddsTestUtils.createStorageReport(dnId, storagePath, capacity, used, free, null);
nodeManager.register(dn, HddsTestUtils.createNodeReport(Arrays.asList(report), Collections.emptyList()), null);
nodeManager.processHeartbeat(dn, layoutInfo);
}
// TODO: wait for EventQueue to be processed
eventQueue.processAll(8000L);
assertEquals(nodeCount, nodeManager.getNodeCount(NodeStatus.inServiceHealthy()));
assertEquals(capacity * nodeCount, (long) nodeManager.getStats().getCapacity().get());
assertEquals(used * nodeCount, (long) nodeManager.getStats().getScmUsed().get());
assertEquals(remaining * nodeCount, (long) nodeManager.getStats().getRemaining().get());
assertEquals(1, nodeManager.minHealthyVolumeNum(dnList));
dnList.clear();
}
}
Aggregations