use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.
the class TestSCMNodeMetrics method testNodeReportProcessingFailure.
/**
* Verifies node report processing failure count.
*/
@Test
public void testNodeReportProcessingFailure() {
long nrProcessed = getCounter("NumNodeReportProcessingFailed");
DatanodeDetails randomDatanode = MockDatanodeDetails.randomDatanodeDetails();
StorageReportProto storageReport = HddsTestUtils.createStorageReport(randomDatanode.getUuid(), "/tmp", 100, 10, 90, null);
NodeReportProto nodeReport = NodeReportProto.newBuilder().addStorageReport(storageReport).build();
nodeManager.processNodeReport(randomDatanode, nodeReport);
assertEquals("NumNodeReportProcessingFailed", nrProcessed + 1, getCounter("NumNodeReportProcessingFailed"));
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.
the class TestSCMNodeMetrics method testNodeReportProcessing.
/**
* Verifies node report processing count.
*
* @throws InterruptedException
*/
@Test
public void testNodeReportProcessing() throws InterruptedException {
long nrProcessed = getCounter("NumNodeReportProcessed");
StorageReportProto storageReport = HddsTestUtils.createStorageReport(registeredDatanode.getUuid(), "/tmp", 100, 10, 90, null);
NodeReportProto nodeReport = NodeReportProto.newBuilder().addStorageReport(storageReport).build();
nodeManager.processNodeReport(registeredDatanode, nodeReport);
Assert.assertEquals("NumNodeReportProcessed", nrProcessed + 1, getCounter("NumNodeReportProcessed"));
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.
the class SCMNodeManager method getNodeStatInternal.
private SCMNodeStat getNodeStatInternal(DatanodeDetails datanodeDetails) {
try {
long capacity = 0L;
long used = 0L;
long remaining = 0L;
final DatanodeInfo datanodeInfo = nodeStateManager.getNode(datanodeDetails);
final List<StorageReportProto> storageReportProtos = datanodeInfo.getStorageReports();
for (StorageReportProto reportProto : storageReportProtos) {
capacity += reportProto.getCapacity();
used += reportProto.getScmUsed();
remaining += reportProto.getRemaining();
}
return new SCMNodeStat(capacity, used, remaining);
} catch (NodeNotFoundException e) {
LOG.warn("Cannot generate NodeStat, datanode {} not found.", datanodeDetails.getUuid());
return null;
}
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.
the class SCMCommonPlacementPolicy method hasEnoughSpace.
/**
* Returns true if this node has enough space to meet our requirement.
*
* @param datanodeDetails DatanodeDetails
* @return true if we have enough space.
*/
public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, long metadataSizeRequired, long dataSizeRequired) {
Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo);
boolean enoughForData = false;
boolean enoughForMeta = false;
DatanodeInfo datanodeInfo = (DatanodeInfo) datanodeDetails;
if (dataSizeRequired > 0) {
for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
if (reportProto.getRemaining() > dataSizeRequired) {
enoughForData = true;
break;
}
}
} else {
enoughForData = true;
}
if (!enoughForData) {
return false;
}
if (metadataSizeRequired > 0) {
for (MetadataStorageReportProto reportProto : datanodeInfo.getMetadataStorageReports()) {
if (reportProto.getRemaining() > metadataSizeRequired) {
enoughForMeta = true;
break;
}
}
} else {
enoughForMeta = true;
}
return enoughForData && enoughForMeta;
}
use of org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto in project ozone by apache.
the class TestEndpoints method setUp.
@Before
public void setUp() throws Exception {
// The following setup runs only once
if (!isSetupDone) {
initializeInjector();
isSetupDone = true;
}
String datanodeId = datanodeDetails.getUuid().toString();
String datanodeId2 = datanodeDetails2.getUuid().toString();
containerReportsProto = ContainerReportsProto.newBuilder().addReports(ContainerReplicaProto.newBuilder().setContainerID(containerId).setState(ContainerReplicaProto.State.OPEN).setOriginNodeId(datanodeId).build()).build();
UUID pipelineUuid = UUID.fromString(pipelineId);
HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder().setMostSigBits(pipelineUuid.getMostSignificantBits()).setLeastSigBits(pipelineUuid.getLeastSignificantBits()).build();
PipelineReport pipelineReport = PipelineReport.newBuilder().setPipelineID(PipelineID.newBuilder().setId(pipelineId).setUuid128(uuid128).build()).setIsLeader(true).build();
PipelineReportsProto pipelineReportsProto = PipelineReportsProto.newBuilder().addPipelineReport(pipelineReport).build();
DatanodeDetailsProto datanodeDetailsProto = DatanodeDetailsProto.newBuilder().setHostName(HOST1).setUuid(datanodeId).setIpAddress(IP1).build();
extendedDatanodeDetailsProto = HddsProtos.ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto).setVersion("0.6.0").setSetupTime(1596347628802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
StorageReportProto storageReportProto1 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(10000).setRemaining(5400).setCapacity(25000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
StorageReportProto storageReportProto2 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(25000).setRemaining(10000).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
NodeReportProto nodeReportProto = NodeReportProto.newBuilder().addStorageReport(storageReportProto1).addStorageReport(storageReportProto2).build();
DatanodeDetailsProto datanodeDetailsProto2 = DatanodeDetailsProto.newBuilder().setHostName(HOST2).setUuid(datanodeId2).setIpAddress(IP2).build();
ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto2 = ExtendedDatanodeDetailsProto.newBuilder().setDatanodeDetails(datanodeDetailsProto2).setVersion("0.6.0").setSetupTime(1596347636802L).setBuildDate("2020-08-01T08:50Z").setRevision("3346f493fa1690358add7bb9f3e5b52545993f36").build();
StorageReportProto storageReportProto3 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk1").setScmUsed(20000).setRemaining(7800).setCapacity(50000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
StorageReportProto storageReportProto4 = StorageReportProto.newBuilder().setStorageType(StorageTypeProto.DISK).setStorageLocation("/disk2").setScmUsed(60000).setRemaining(10000).setCapacity(80000).setStorageUuid(UUID.randomUUID().toString()).setFailed(false).build();
NodeReportProto nodeReportProto2 = NodeReportProto.newBuilder().addStorageReport(storageReportProto3).addStorageReport(storageReportProto4).build();
LayoutVersionProto layoutInfo = defaultLayoutVersionProto();
try {
reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto, nodeReportProto, containerReportsProto, pipelineReportsProto, layoutInfo);
reconScm.getDatanodeProtocolServer().register(extendedDatanodeDetailsProto2, nodeReportProto2, ContainerReportsProto.newBuilder().build(), PipelineReportsProto.newBuilder().build(), defaultLayoutVersionProto());
// Process all events in the event queue
reconScm.getEventQueue().processAll(1000);
} catch (Exception ex) {
Assert.fail(ex.getMessage());
}
// Write Data to OM
// A sample volume (sampleVol) and a bucket (bucketOne) is already created
// in AbstractOMMetadataManagerTest.
// Create a new volume and bucket and then write keys to the bucket.
String volumeKey = reconOMMetadataManager.getVolumeKey("sampleVol2");
OmVolumeArgs args = OmVolumeArgs.newBuilder().setVolume("sampleVol2").setAdminName("TestUser").setOwnerName("TestUser").build();
reconOMMetadataManager.getVolumeTable().put(volumeKey, args);
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder().setVolumeName("sampleVol2").setBucketName("bucketOne").build();
String bucketKey = reconOMMetadataManager.getBucketKey(bucketInfo.getVolumeName(), bucketInfo.getBucketName());
reconOMMetadataManager.getBucketTable().put(bucketKey, bucketInfo);
// key = key_one
writeDataToOm(reconOMMetadataManager, "key_one");
// key = key_two
writeDataToOm(reconOMMetadataManager, "key_two");
// key = key_three
writeDataToOm(reconOMMetadataManager, "key_three");
// Truncate global stats table before running each test
dslContext.truncate(GLOBAL_STATS);
}
Aggregations