use of org.apache.hadoop.hdds.scm.pipeline.PipelineManager in project ozone by apache.
the class TestReconAsPassiveScm method testDatanodeRegistrationAndReports.
@Test
public void testDatanodeRegistrationAndReports() throws Exception {
ReconStorageContainerManagerFacade reconScm = (ReconStorageContainerManagerFacade) cluster.getReconServer().getReconStorageContainerManager();
StorageContainerManager scm = cluster.getStorageContainerManager();
PipelineManager reconPipelineManager = reconScm.getPipelineManager();
PipelineManager scmPipelineManager = scm.getPipelineManager();
LambdaTestUtils.await(60000, 5000, () -> (reconPipelineManager.getPipelines().size() >= 4));
// Verify if Recon has all the pipelines from SCM.
scmPipelineManager.getPipelines().forEach(p -> {
try {
assertNotNull(reconPipelineManager.getPipeline(p.getId()));
} catch (PipelineNotFoundException e) {
Assert.fail();
}
});
// Verify we can never create a pipeline in Recon.
LambdaTestUtils.intercept(UnsupportedOperationException.class, "Trying to create pipeline in Recon, which is prohibited!", () -> reconPipelineManager.createPipeline(RatisReplicationConfig.getInstance(ONE)));
ContainerManager scmContainerManager = scm.getContainerManager();
assertTrue(scmContainerManager.getContainers().isEmpty());
// Verify if all the 3 nodes are registered with Recon.
NodeManager reconNodeManager = reconScm.getScmNodeManager();
NodeManager scmNodeManager = scm.getScmNodeManager();
assertEquals(scmNodeManager.getAllNodes().size(), reconNodeManager.getAllNodes().size());
// Create container
ContainerManager reconContainerManager = reconScm.getContainerManager();
ContainerInfo containerInfo = scmContainerManager.allocateContainer(RatisReplicationConfig.getInstance(ONE), "test");
long containerID = containerInfo.getContainerID();
Pipeline pipeline = scmPipelineManager.getPipeline(containerInfo.getPipelineID());
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
runTestOzoneContainerViaDataNode(containerID, client);
// Verify Recon picked up the new container that was created.
assertEquals(scmContainerManager.getContainerIDs(), reconContainerManager.getContainerIDs());
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(ReconNodeManager.LOG);
GenericTestUtils.setLogLevel(ReconNodeManager.LOG, Level.DEBUG);
reconScm.getEventQueue().fireEvent(CLOSE_CONTAINER, containerInfo.containerID());
GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("Ignoring unsupported command closeContainerCommand"), 1000, 20000);
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineManager in project ozone by apache.
the class TestReconAsPassiveScm method testReconRestart.
@Test
public void testReconRestart() throws Exception {
final OzoneStorageContainerManager reconScm = cluster.getReconServer().getReconStorageContainerManager();
StorageContainerManager scm = cluster.getStorageContainerManager();
// Stop Recon
ContainerManager scmContainerManager = scm.getContainerManager();
assertTrue(scmContainerManager.getContainers().isEmpty());
ContainerManager reconContainerManager = reconScm.getContainerManager();
assertTrue(reconContainerManager.getContainers().isEmpty());
LambdaTestUtils.await(60000, 5000, () -> (reconScm.getScmNodeManager().getAllNodes().size() == 3));
cluster.stopRecon();
// Create container in SCM.
ContainerInfo containerInfo = scmContainerManager.allocateContainer(RatisReplicationConfig.getInstance(ONE), "test");
long containerID = containerInfo.getContainerID();
PipelineManager scmPipelineManager = scm.getPipelineManager();
Pipeline pipeline = scmPipelineManager.getPipeline(containerInfo.getPipelineID());
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
runTestOzoneContainerViaDataNode(containerID, client);
assertFalse(scmContainerManager.getContainers().isEmpty());
// Close a pipeline
Optional<Pipeline> pipelineToClose = scmPipelineManager.getPipelines(RatisReplicationConfig.getInstance(ONE)).stream().filter(p -> !p.getId().equals(containerInfo.getPipelineID())).findFirst();
assertTrue(pipelineToClose.isPresent());
scmPipelineManager.closePipeline(pipelineToClose.get(), false);
// Start Recon
cluster.startRecon();
// Verify if Recon has all the nodes on restart (even if heartbeats are
// not yet received).
NodeManager reconNodeManager = reconScm.getScmNodeManager();
NodeManager scmNodeManager = scm.getScmNodeManager();
assertEquals(scmNodeManager.getAllNodes().size(), reconNodeManager.getAllNodes().size());
// Verify Recon picks up new container, close pipeline SCM actions.
OzoneStorageContainerManager newReconScm = cluster.getReconServer().getReconStorageContainerManager();
PipelineManager reconPipelineManager = newReconScm.getPipelineManager();
assertFalse(reconPipelineManager.containsPipeline(pipelineToClose.get().getId()));
LambdaTestUtils.await(90000, 5000, () -> (newReconScm.getContainerManager().containerExist(ContainerID.valueOf(containerID))));
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineManager in project ozone by apache.
the class TestReconScmSnapshot method testSnapshot.
public static void testSnapshot(MiniOzoneCluster cluster) throws Exception {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger(ReconStorageContainerManagerFacade.class));
List<ContainerInfo> reconContainers = cluster.getReconServer().getReconStorageContainerManager().getContainerManager().getContainers();
assertEquals(0, reconContainers.size());
ReconNodeManager nodeManager;
nodeManager = (ReconNodeManager) cluster.getReconServer().getReconStorageContainerManager().getScmNodeManager();
long keyCountBefore = nodeManager.getNodeDBKeyCount();
// Stopping Recon to add Containers in SCM
cluster.stopRecon();
ContainerManager containerManager;
containerManager = cluster.getStorageContainerManager().getContainerManager();
for (int i = 0; i < 10; i++) {
containerManager.allocateContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), "testOwner");
}
cluster.startRecon();
// ContainerCount after Recon DB is updated with SCM DB
containerManager = cluster.getStorageContainerManager().getContainerManager();
ContainerManager reconContainerManager = cluster.getReconServer().getReconStorageContainerManager().getContainerManager();
assertTrue(logCapturer.getOutput().contains("Recon Container Count: " + reconContainers.size() + ", SCM Container Count: " + containerManager.getContainers().size()));
assertEquals(containerManager.getContainers().size(), reconContainerManager.getContainers().size());
// PipelineCount after Recon DB is updated with SCM DB
PipelineManager scmPipelineManager = cluster.getStorageContainerManager().getPipelineManager();
PipelineManager reconPipelineManager = cluster.getReconServer().getReconStorageContainerManager().getPipelineManager();
assertEquals(scmPipelineManager.getPipelines().size(), reconPipelineManager.getPipelines().size());
// NodeCount after Recon DB updated with SCM DB
nodeManager = (ReconNodeManager) cluster.getReconServer().getReconStorageContainerManager().getScmNodeManager();
long keyCountAfter = nodeManager.getNodeDBKeyCount();
assertEquals(keyCountAfter, keyCountBefore);
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineManager in project ozone by apache.
the class TestFreonWithPipelineDestroy method destroyPipeline.
private void destroyPipeline() throws Exception {
XceiverServerSpi server = cluster.getHddsDatanodes().get(0).getDatanodeStateMachine().getContainer().getWriteChannel();
StorageContainerDatanodeProtocolProtos.PipelineReport report = server.getPipelineReport().get(0);
PipelineID id = PipelineID.getFromProtobuf(report.getPipelineID());
PipelineManager pipelineManager = cluster.getStorageContainerManager().getPipelineManager();
Pipeline pipeline = pipelineManager.getPipeline(id);
pipelineManager.closePipeline(pipeline, false);
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineManager in project ozone by apache.
the class SCMNodeManager method getPeerList.
@Override
public Collection<DatanodeDetails> getPeerList(DatanodeDetails dn) {
HashSet<DatanodeDetails> dns = new HashSet<>();
Preconditions.checkNotNull(dn);
Set<PipelineID> pipelines = nodeStateManager.getPipelineByDnID(dn.getUuid());
PipelineManager pipelineManager = scmContext.getScm().getPipelineManager();
if (!pipelines.isEmpty()) {
pipelines.forEach(id -> {
try {
Pipeline pipeline = pipelineManager.getPipeline(id);
List<DatanodeDetails> peers = pipeline.getNodes();
dns.addAll(peers);
} catch (PipelineNotFoundException pnfe) {
// ignore the pipeline not found exception here
}
});
}
// renove self node from the set
dns.remove(dn);
return dns;
}
Aggregations