use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class NodeStateMap method addNode.
/**
* Adds a node to NodeStateMap.
*
* @param datanodeDetails DatanodeDetails
* @param nodeStatus initial NodeStatus
* @param layoutInfo initial LayoutVersionProto
*
* @throws NodeAlreadyExistsException if the node already exist
*/
public void addNode(DatanodeDetails datanodeDetails, NodeStatus nodeStatus, LayoutVersionProto layoutInfo) throws NodeAlreadyExistsException {
lock.writeLock().lock();
try {
UUID id = datanodeDetails.getUuid();
if (nodeMap.containsKey(id)) {
throw new NodeAlreadyExistsException("Node UUID: " + id);
}
nodeMap.put(id, new DatanodeInfo(datanodeDetails, nodeStatus, layoutInfo));
nodeToContainer.put(id, new HashSet<>());
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class NodeStateMap method updateNodeHealthState.
/**
* Updates the node health state.
*
* @param nodeId Node Id
* @param newHealth new health state
*
* @throws NodeNotFoundException if the node is not present
*/
public NodeStatus updateNodeHealthState(UUID nodeId, NodeState newHealth) throws NodeNotFoundException {
try {
lock.writeLock().lock();
DatanodeInfo dn = getNodeInfo(nodeId);
NodeStatus oldStatus = dn.getNodeStatus();
NodeStatus newStatus = new NodeStatus(oldStatus.getOperationalState(), newHealth);
dn.setNodeStatus(newStatus);
return newStatus;
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class SCMCommonPlacementPolicy method hasEnoughSpace.
/**
* Returns true if this node has enough space to meet our requirement.
*
* @param datanodeDetails DatanodeDetails
* @return true if we have enough space.
*/
public static boolean hasEnoughSpace(DatanodeDetails datanodeDetails, long metadataSizeRequired, long dataSizeRequired) {
Preconditions.checkArgument(datanodeDetails instanceof DatanodeInfo);
boolean enoughForData = false;
boolean enoughForMeta = false;
DatanodeInfo datanodeInfo = (DatanodeInfo) datanodeDetails;
if (dataSizeRequired > 0) {
for (StorageReportProto reportProto : datanodeInfo.getStorageReports()) {
if (reportProto.getRemaining() > dataSizeRequired) {
enoughForData = true;
break;
}
}
} else {
enoughForData = true;
}
if (!enoughForData) {
return false;
}
if (metadataSizeRequired > 0) {
for (MetadataStorageReportProto reportProto : datanodeInfo.getMetadataStorageReports()) {
if (reportProto.getRemaining() > metadataSizeRequired) {
enoughForMeta = true;
break;
}
}
} else {
enoughForMeta = true;
}
return enoughForData && enoughForMeta;
}
use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class NodeEndpoint method getDatanodes.
/**
* Return the list of datanodes with detailed information about each datanode.
* @return {@link Response}
*/
@GET
public Response getDatanodes() {
List<DatanodeMetadata> datanodes = new ArrayList<>();
List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
datanodeDetails.forEach(datanode -> {
DatanodeStorageReport storageReport = getStorageReport(datanode);
NodeState nodeState = null;
try {
nodeState = nodeManager.getNodeStatus(datanode).getHealth();
} catch (NodeNotFoundException e) {
LOG.warn("Cannot get nodeState for datanode {}", datanode, e);
}
final NodeOperationalState nodeOpState = datanode.getPersistedOpState();
String hostname = datanode.getHostName();
Set<PipelineID> pipelineIDs = nodeManager.getPipelines(datanode);
List<DatanodePipeline> pipelines = new ArrayList<>();
AtomicInteger leaderCount = new AtomicInteger();
AtomicInteger openContainers = new AtomicInteger();
DatanodeMetadata.Builder builder = DatanodeMetadata.newBuilder();
pipelineIDs.forEach(pipelineID -> {
try {
Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
String leaderNode = pipeline.getLeaderNode().getHostName();
DatanodePipeline datanodePipeline = new DatanodePipeline(pipelineID.getId(), pipeline.getReplicationConfig().getReplicationType().toString(), ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig()).getNumber(), leaderNode);
pipelines.add(datanodePipeline);
if (datanode.getUuid().equals(pipeline.getLeaderId())) {
leaderCount.getAndIncrement();
}
int openContainerPerPipeline = reconContainerManager.getPipelineToOpenContainer().getOrDefault(pipelineID, 0);
openContainers.getAndAdd(openContainerPerPipeline);
} catch (PipelineNotFoundException ex) {
LOG.warn("Cannot get pipeline {} for datanode {}, pipeline not found", pipelineID.getId(), hostname, ex);
} catch (IOException ioEx) {
LOG.warn("Cannot get leader node of pipeline with id {}.", pipelineID.getId(), ioEx);
}
});
try {
Set<ContainerID> allContainers = nodeManager.getContainers(datanode);
builder.withContainers(allContainers.size());
builder.withOpenContainers(openContainers.get());
} catch (NodeNotFoundException ex) {
LOG.warn("Cannot get containers, datanode {} not found.", datanode.getUuid(), ex);
}
DatanodeInfo dnInfo = (DatanodeInfo) datanode;
datanodes.add(builder.withHostname(nodeManager.getHostName(datanode)).withDatanodeStorageReport(storageReport).withLastHeartbeat(nodeManager.getLastHeartbeat(datanode)).withState(nodeState).withOperationalState(nodeOpState).withPipelines(pipelines).withLeaderCount(leaderCount.get()).withUUid(datanode.getUuidString()).withVersion(nodeManager.getVersion(datanode)).withSetupTime(nodeManager.getSetupTime(datanode)).withRevision(nodeManager.getRevision(datanode)).withBuildDate(nodeManager.getBuildDate(datanode)).withLayoutVersion(dnInfo.getLastKnownLayoutVersion().getMetadataLayoutVersion()).build());
});
DatanodesResponse datanodesResponse = new DatanodesResponse(datanodes.size(), datanodes);
return Response.ok(datanodesResponse).build();
}
use of org.apache.hadoop.hdds.scm.node.DatanodeInfo in project ozone by apache.
the class TestContainerPlacementFactory method testRackAwarePolicy.
@Test
public void testRackAwarePolicy() throws IOException {
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementRackAware.class.getName());
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
NodeSchemaManager.getInstance().init(schemas, true);
cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
// build datanodes, and network topology
String rack = "/rack";
String hostname = "node";
for (int i = 0; i < 15; i++) {
// Totally 3 racks, each has 5 datanodes
DatanodeDetails datanodeDetails = MockDatanodeDetails.createDatanodeDetails(hostname + i, rack + (i / 5));
DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
datanodes.add(datanodeDetails);
cluster.add(datanodeDetails);
dnInfos.add(datanodeInfo);
}
StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
StorageReportProto storage4 = HddsTestUtils.createStorageReport(dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null);
dnInfos.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
// create mock node manager
nodeManager = Mockito.mock(NodeManager.class);
when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
for (DatanodeInfo dn : dnInfos) {
when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
}
PlacementPolicy policy = ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager, cluster, true, SCMContainerPlacementMetrics.create());
int nodeNum = 3;
List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15, 15);
Assert.assertEquals(nodeNum, datanodeDetails.size());
Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
Aggregations