use of org.apache.hadoop.hdds.scm.PlacementPolicy in project ozone by apache.
the class TestContainerHealthTaskRecordGenerator method setup.
@Before
public void setup() {
placementPolicy = mock(PlacementPolicy.class);
container = mock(ContainerInfo.class);
when(container.getReplicationConfig()).thenReturn(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
when(container.containerID()).thenReturn(ContainerID.valueOf(123456));
when(container.getContainerID()).thenReturn((long) 123456);
when(placementPolicy.validateContainerPlacement(Mockito.anyList(), Mockito.anyInt())).thenReturn(new ContainerPlacementStatusDefault(1, 1, 1));
}
use of org.apache.hadoop.hdds.scm.PlacementPolicy in project ozone by apache.
the class TestContainerPlacementFactory method testRackAwarePolicy.
@Test
public void testRackAwarePolicy() throws IOException {
conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, SCMContainerPlacementRackAware.class.getName());
conf.setStorageSize(OZONE_DATANODE_RATIS_VOLUME_FREE_SPACE_MIN, 0, StorageUnit.MB);
NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
NodeSchemaManager.getInstance().init(schemas, true);
cluster = new NetworkTopologyImpl(NodeSchemaManager.getInstance());
// build datanodes, and network topology
String rack = "/rack";
String hostname = "node";
for (int i = 0; i < 15; i++) {
// Totally 3 racks, each has 5 datanodes
DatanodeDetails datanodeDetails = MockDatanodeDetails.createDatanodeDetails(hostname + i, rack + (i / 5));
DatanodeInfo datanodeInfo = new DatanodeInfo(datanodeDetails, NodeStatus.inServiceHealthy(), UpgradeUtils.defaultLayoutVersionProto());
StorageReportProto storage1 = HddsTestUtils.createStorageReport(datanodeInfo.getUuid(), "/data1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
MetadataStorageReportProto metaStorage1 = HddsTestUtils.createMetadataStorageReport("/metadata1-" + datanodeInfo.getUuidString(), STORAGE_CAPACITY, 0, 100L, null);
datanodeInfo.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
datanodeInfo.updateMetaDataStorageReports(new ArrayList<>(Arrays.asList(metaStorage1)));
datanodes.add(datanodeDetails);
cluster.add(datanodeDetails);
dnInfos.add(datanodeInfo);
}
StorageReportProto storage2 = HddsTestUtils.createStorageReport(dnInfos.get(2).getUuid(), "/data1-" + dnInfos.get(2).getUuidString(), STORAGE_CAPACITY, 90L, 10L, null);
dnInfos.get(2).updateStorageReports(new ArrayList<>(Arrays.asList(storage2)));
StorageReportProto storage3 = HddsTestUtils.createStorageReport(dnInfos.get(3).getUuid(), "/data1-" + dnInfos.get(3).getUuidString(), STORAGE_CAPACITY, 80L, 20L, null);
dnInfos.get(3).updateStorageReports(new ArrayList<>(Arrays.asList(storage3)));
StorageReportProto storage4 = HddsTestUtils.createStorageReport(dnInfos.get(4).getUuid(), "/data1-" + dnInfos.get(4).getUuidString(), STORAGE_CAPACITY, 70L, 30L, null);
dnInfos.get(4).updateStorageReports(new ArrayList<>(Arrays.asList(storage4)));
// create mock node manager
nodeManager = Mockito.mock(NodeManager.class);
when(nodeManager.getNodes(NodeStatus.inServiceHealthy())).thenReturn(new ArrayList<>(datanodes));
for (DatanodeInfo dn : dnInfos) {
when(nodeManager.getNodeByUuid(dn.getUuidString())).thenReturn(dn);
}
PlacementPolicy policy = ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager, cluster, true, SCMContainerPlacementMetrics.create());
int nodeNum = 3;
List<DatanodeDetails> datanodeDetails = policy.chooseDatanodes(null, null, nodeNum, 15, 15);
Assert.assertEquals(nodeNum, datanodeDetails.size());
Assert.assertTrue(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(1)));
Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(0), datanodeDetails.get(2)));
Assert.assertFalse(cluster.isSameParent(datanodeDetails.get(1), datanodeDetails.get(2)));
}
use of org.apache.hadoop.hdds.scm.PlacementPolicy in project ozone by apache.
the class TestContainerPlacementFactory method testDefaultPolicy.
@Test
public void testDefaultPolicy() throws IOException {
PlacementPolicy policy = ContainerPlacementPolicyFactory.getPolicy(conf, null, null, true, null);
Assert.assertSame(SCMContainerPlacementRandom.class, policy.getClass());
}
use of org.apache.hadoop.hdds.scm.PlacementPolicy in project ozone by apache.
the class TestContainerHealthStatus method setup.
@Before
public void setup() {
placementPolicy = mock(PlacementPolicy.class);
container = mock(ContainerInfo.class);
when(container.getReplicationConfig()).thenReturn(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
when(container.containerID()).thenReturn(ContainerID.valueOf(123456));
when(container.getContainerID()).thenReturn((long) 123456);
when(placementPolicy.validateContainerPlacement(Mockito.anyList(), Mockito.anyInt())).thenReturn(new ContainerPlacementStatusDefault(1, 1, 1));
}
use of org.apache.hadoop.hdds.scm.PlacementPolicy in project ozone by apache.
the class ReplicationManager method handleUnderReplicatedContainer.
/**
* If the given container is under replicated, identify a new set of
* datanode(s) to replicate the container using PlacementPolicy
* and send replicate container command to the identified datanode(s).
*
* @param container ContainerInfo
* @param replicaSet An instance of ContainerReplicaCount, containing the
* current replica count and inflight adds and deletes
*/
private void handleUnderReplicatedContainer(final ContainerInfo container, final ContainerReplicaCount replicaSet, final ContainerPlacementStatus placementStatus) {
LOG.debug("Handling under-replicated container: {}", container);
Set<ContainerReplica> replicas = replicaSet.getReplica();
try {
if (replicaSet.isSufficientlyReplicated() && placementStatus.isPolicySatisfied()) {
LOG.info("The container {} with replicas {} is sufficiently " + "replicated and is not mis-replicated", container.getContainerID(), replicaSet);
return;
}
int repDelta = replicaSet.additionalReplicaNeeded();
final ContainerID id = container.containerID();
final List<DatanodeDetails> deletionInFlight = inflightDeletion.getOrDefault(id, Collections.emptyList()).stream().map(action -> action.datanode).collect(Collectors.toList());
final List<DatanodeDetails> replicationInFlight = inflightReplication.getOrDefault(id, Collections.emptyList()).stream().map(action -> action.datanode).collect(Collectors.toList());
final List<DatanodeDetails> source = replicas.stream().filter(r -> r.getState() == State.QUASI_CLOSED || r.getState() == State.CLOSED).filter(r -> getNodeStatus(r.getDatanodeDetails()).isHealthy()).filter(r -> !deletionInFlight.contains(r.getDatanodeDetails())).sorted((r1, r2) -> r2.getSequenceId().compareTo(r1.getSequenceId())).map(ContainerReplica::getDatanodeDetails).collect(Collectors.toList());
if (source.size() > 0) {
final int replicationFactor = container.getReplicationConfig().getRequiredNodes();
// Want to check if the container is mis-replicated after considering
// inflight add and delete.
// Create a new list from source (healthy replicas minus pending delete)
List<DatanodeDetails> targetReplicas = new ArrayList<>(source);
// Then add any pending additions
targetReplicas.addAll(replicationInFlight);
final ContainerPlacementStatus inFlightplacementStatus = containerPlacement.validateContainerPlacement(targetReplicas, replicationFactor);
final int misRepDelta = inFlightplacementStatus.misReplicationCount();
final int replicasNeeded = repDelta < misRepDelta ? misRepDelta : repDelta;
if (replicasNeeded <= 0) {
LOG.debug("Container {} meets replication requirement with " + "inflight replicas", id);
return;
}
// We should ensure that the target datanode has enough space
// for a complete container to be created, but since the container
// size may be changed smaller than origin, we should be defensive.
final long dataSizeRequired = Math.max(container.getUsedBytes(), currentContainerSize);
final List<DatanodeDetails> excludeList = replicas.stream().map(ContainerReplica::getDatanodeDetails).collect(Collectors.toList());
excludeList.addAll(replicationInFlight);
final List<DatanodeDetails> selectedDatanodes = containerPlacement.chooseDatanodes(excludeList, null, replicasNeeded, 0, dataSizeRequired);
if (repDelta > 0) {
LOG.info("Container {} is under replicated. Expected replica count" + " is {}, but found {}.", id, replicationFactor, replicationFactor - repDelta);
}
int newMisRepDelta = misRepDelta;
if (misRepDelta > 0) {
LOG.info("Container: {}. {}", id, placementStatus.misReplicatedReason());
// Check if the new target nodes (original plus newly selected nodes)
// makes the placement policy valid.
targetReplicas.addAll(selectedDatanodes);
newMisRepDelta = containerPlacement.validateContainerPlacement(targetReplicas, replicationFactor).misReplicationCount();
}
if (repDelta > 0 || newMisRepDelta < misRepDelta) {
// improves things.
for (DatanodeDetails datanode : selectedDatanodes) {
sendReplicateCommand(container, datanode, source);
}
} else {
LOG.warn("Container {} is mis-replicated, requiring {} additional " + "replicas. After selecting new nodes, mis-replication has not " + "improved. No additional replicas will be scheduled", id, misRepDelta);
}
} else {
LOG.warn("Cannot replicate container {}, no healthy replica found.", container.containerID());
}
} catch (IOException | IllegalStateException ex) {
LOG.warn("Exception while replicating container {}.", container.getContainerID(), ex);
}
}
Aggregations