use of org.apache.hadoop.hdds.scm.ha.SCMHAManager in project ozone by apache.
the class TestReconContainerManager method testAddNewClosedContainer.
@Test
public void testAddNewClosedContainer() throws IOException {
ContainerWithPipeline containerWithPipeline = getTestContainer(CLOSED);
ContainerID containerID = containerWithPipeline.getContainerInfo().containerID();
ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
ReconContainerManager containerManager = getContainerManager();
assertFalse(containerManager.containerExist(containerID));
assertFalse(getContainerTable().isExist(containerID));
containerManager.addNewContainer(containerWithPipeline);
assertTrue(containerManager.containerExist(containerID));
List<ContainerInfo> containers = containerManager.getContainers(CLOSED);
assertEquals(1, containers.size());
assertEquals(containerInfo, containers.get(0));
// Verify container DB.
SCMHAManager scmhaManager = containerManager.getSCMHAManager();
scmhaManager.getDBTransactionBuffer().close();
assertTrue(getContainerTable().isExist(containerID));
}
use of org.apache.hadoop.hdds.scm.ha.SCMHAManager in project ozone by apache.
the class TestRatisPipelineProvider method init.
public void init(int maxPipelinePerNode, OzoneConfiguration conf) throws Exception {
testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
nodeManager = new MockNodeManager(true, 10);
nodeManager.setNumPipelinePerDatanode(maxPipelinePerNode);
SCMHAManager scmhaManager = MockSCMHAManager.getInstance(true);
conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, maxPipelinePerNode);
stateManager = PipelineStateManagerImpl.newBuilder().setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)).setRatisServer(scmhaManager.getRatisServer()).setNodeManager(nodeManager).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
provider = new MockRatisPipelineProvider(nodeManager, stateManager, conf);
}
use of org.apache.hadoop.hdds.scm.ha.SCMHAManager in project ozone by apache.
the class TestPipelineStateManagerImpl method init.
@Before
public void init() throws Exception {
final OzoneConfiguration conf = SCMTestUtils.getConf();
testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
SCMHAManager scmhaManager = MockSCMHAManager.getInstance(true);
NodeManager nodeManager = new MockNodeManager(true, 10);
stateManager = PipelineStateManagerImpl.newBuilder().setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)).setRatisServer(scmhaManager.getRatisServer()).setNodeManager(nodeManager).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
}
use of org.apache.hadoop.hdds.scm.ha.SCMHAManager in project ozone by apache.
the class TestReplicationManager method setup.
@Before
public void setup() throws IOException, InterruptedException, NodeNotFoundException, InvalidStateTransitionException {
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, 0, TimeUnit.SECONDS);
scmLogs = GenericTestUtils.LogCapturer.captureLogs(ReplicationManager.LOG);
containerManager = Mockito.mock(ContainerManager.class);
nodeManager = new SimpleMockNodeManager();
eventQueue = new EventQueue();
scmhaManager = MockSCMHAManager.getInstance(true);
testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
pipelineManager = Mockito.mock(PipelineManager.class);
when(pipelineManager.containsPipeline(Mockito.any(PipelineID.class))).thenReturn(true);
containerStateManager = ContainerStateManagerImpl.newBuilder().setConfiguration(conf).setPipelineManager(pipelineManager).setRatisServer(scmhaManager.getRatisServer()).setContainerStore(SCMDBDefinition.CONTAINERS.getTable(dbStore)).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
serviceManager = new SCMServiceManager();
datanodeCommandHandler = new DatanodeCommandHandler();
eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, datanodeCommandHandler);
Mockito.when(containerManager.getContainers()).thenAnswer(invocation -> {
Set<ContainerID> ids = containerStateManager.getContainerIDs();
List<ContainerInfo> containers = new ArrayList<>();
for (ContainerID id : ids) {
containers.add(containerStateManager.getContainer(id));
}
return containers;
});
Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainer(((ContainerID) invocation.getArguments()[0])));
Mockito.when(containerManager.getContainerReplicas(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainerReplicas(((ContainerID) invocation.getArguments()[0])));
containerPlacementPolicy = Mockito.mock(PlacementPolicy.class);
Mockito.when(containerPlacementPolicy.chooseDatanodes(Mockito.any(), Mockito.any(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())).thenAnswer(invocation -> {
int count = (int) invocation.getArguments()[2];
return IntStream.range(0, count).mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList());
});
Mockito.when(containerPlacementPolicy.validateContainerPlacement(Mockito.any(), Mockito.anyInt())).thenAnswer(invocation -> new ContainerPlacementStatusDefault(2, 2, 3));
clock = new TestClock(Instant.now(), ZoneId.of("UTC"));
createReplicationManager(new ReplicationManagerConfiguration());
}
use of org.apache.hadoop.hdds.scm.ha.SCMHAManager in project ozone by apache.
the class TestPipelineDatanodesIntersection method testPipelineDatanodesIntersection.
@Test
public void testPipelineDatanodesIntersection() throws IOException {
NodeManager nodeManager = new MockNodeManager(true, nodeCount);
conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness);
conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
SCMHAManager scmhaManager = MockSCMHAManager.getInstance(true);
PipelineStateManager stateManager = PipelineStateManagerImpl.newBuilder().setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)).setRatisServer(scmhaManager.getRatisServer()).setNodeManager(nodeManager).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
PipelineProvider provider = new MockRatisPipelineProvider(nodeManager, stateManager, conf);
int healthyNodeCount = nodeManager.getNodeCount(NodeStatus.inServiceHealthy());
int intersectionCount = 0;
int createdPipelineCount = 0;
while (!end && createdPipelineCount <= healthyNodeCount * nodeHeaviness) {
try {
Pipeline pipeline = provider.create(RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(ClientVersions.CURRENT_VERSION);
stateManager.addPipeline(pipelineProto);
nodeManager.addPipeline(pipeline);
List<Pipeline> overlapPipelines = RatisPipelineUtils.checkPipelineContainSameDatanodes(stateManager, pipeline);
if (overlapPipelines.isEmpty()) {
intersectionCount++;
for (Pipeline overlapPipeline : overlapPipelines) {
LOG.info("This pipeline: " + pipeline.getId().toString() + " overlaps with previous pipeline: " + overlapPipeline.getId() + ". They share same set of datanodes as: " + pipeline.getNodesInOrder().get(0).getUuid() + "/" + pipeline.getNodesInOrder().get(1).getUuid() + "/" + pipeline.getNodesInOrder().get(2).getUuid() + " and " + overlapPipeline.getNodesInOrder().get(0).getUuid() + "/" + overlapPipeline.getNodesInOrder().get(1).getUuid() + "/" + overlapPipeline.getNodesInOrder().get(2).getUuid() + " is the same.");
}
}
createdPipelineCount++;
} catch (SCMException e) {
end = true;
} catch (IOException e) {
end = true;
// Should not throw regular IOException.
Assert.fail();
}
}
end = false;
LOG.info("Among total " + stateManager.getPipelines(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).size() + " created pipelines" + " with " + healthyNodeCount + " healthy datanodes and " + nodeHeaviness + " as node heaviness, " + intersectionCount + " pipelines has same set of datanodes.");
}
Aggregations