use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestReconAsPassiveScm method testDatanodeRegistrationAndReports.
@Test
public void testDatanodeRegistrationAndReports() throws Exception {
ReconStorageContainerManagerFacade reconScm = (ReconStorageContainerManagerFacade) cluster.getReconServer().getReconStorageContainerManager();
StorageContainerManager scm = cluster.getStorageContainerManager();
PipelineManager reconPipelineManager = reconScm.getPipelineManager();
PipelineManager scmPipelineManager = scm.getPipelineManager();
LambdaTestUtils.await(60000, 5000, () -> (reconPipelineManager.getPipelines().size() >= 4));
// Verify if Recon has all the pipelines from SCM.
scmPipelineManager.getPipelines().forEach(p -> {
try {
assertNotNull(reconPipelineManager.getPipeline(p.getId()));
} catch (PipelineNotFoundException e) {
Assert.fail();
}
});
// Verify we can never create a pipeline in Recon.
LambdaTestUtils.intercept(UnsupportedOperationException.class, "Trying to create pipeline in Recon, which is prohibited!", () -> reconPipelineManager.createPipeline(RatisReplicationConfig.getInstance(ONE)));
ContainerManager scmContainerManager = scm.getContainerManager();
assertTrue(scmContainerManager.getContainers().isEmpty());
// Verify if all the 3 nodes are registered with Recon.
NodeManager reconNodeManager = reconScm.getScmNodeManager();
NodeManager scmNodeManager = scm.getScmNodeManager();
assertEquals(scmNodeManager.getAllNodes().size(), reconNodeManager.getAllNodes().size());
// Create container
ContainerManager reconContainerManager = reconScm.getContainerManager();
ContainerInfo containerInfo = scmContainerManager.allocateContainer(RatisReplicationConfig.getInstance(ONE), "test");
long containerID = containerInfo.getContainerID();
Pipeline pipeline = scmPipelineManager.getPipeline(containerInfo.getPipelineID());
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
runTestOzoneContainerViaDataNode(containerID, client);
// Verify Recon picked up the new container that was created.
assertEquals(scmContainerManager.getContainerIDs(), reconContainerManager.getContainerIDs());
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(ReconNodeManager.LOG);
GenericTestUtils.setLogLevel(ReconNodeManager.LOG, Level.DEBUG);
reconScm.getEventQueue().fireEvent(CLOSE_CONTAINER, containerInfo.containerID());
GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("Ignoring unsupported command closeContainerCommand"), 1000, 20000);
}
use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestReconAsPassiveScm method testReconRestart.
@Test
public void testReconRestart() throws Exception {
final OzoneStorageContainerManager reconScm = cluster.getReconServer().getReconStorageContainerManager();
StorageContainerManager scm = cluster.getStorageContainerManager();
// Stop Recon
ContainerManager scmContainerManager = scm.getContainerManager();
assertTrue(scmContainerManager.getContainers().isEmpty());
ContainerManager reconContainerManager = reconScm.getContainerManager();
assertTrue(reconContainerManager.getContainers().isEmpty());
LambdaTestUtils.await(60000, 5000, () -> (reconScm.getScmNodeManager().getAllNodes().size() == 3));
cluster.stopRecon();
// Create container in SCM.
ContainerInfo containerInfo = scmContainerManager.allocateContainer(RatisReplicationConfig.getInstance(ONE), "test");
long containerID = containerInfo.getContainerID();
PipelineManager scmPipelineManager = scm.getPipelineManager();
Pipeline pipeline = scmPipelineManager.getPipeline(containerInfo.getPipelineID());
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
runTestOzoneContainerViaDataNode(containerID, client);
assertFalse(scmContainerManager.getContainers().isEmpty());
// Close a pipeline
Optional<Pipeline> pipelineToClose = scmPipelineManager.getPipelines(RatisReplicationConfig.getInstance(ONE)).stream().filter(p -> !p.getId().equals(containerInfo.getPipelineID())).findFirst();
assertTrue(pipelineToClose.isPresent());
scmPipelineManager.closePipeline(pipelineToClose.get(), false);
// Start Recon
cluster.startRecon();
// Verify if Recon has all the nodes on restart (even if heartbeats are
// not yet received).
NodeManager reconNodeManager = reconScm.getScmNodeManager();
NodeManager scmNodeManager = scm.getScmNodeManager();
assertEquals(scmNodeManager.getAllNodes().size(), reconNodeManager.getAllNodes().size());
// Verify Recon picks up new container, close pipeline SCM actions.
OzoneStorageContainerManager newReconScm = cluster.getReconServer().getReconStorageContainerManager();
PipelineManager reconPipelineManager = newReconScm.getPipelineManager();
assertFalse(reconPipelineManager.containsPipeline(pipelineToClose.get().getId()));
LambdaTestUtils.await(90000, 5000, () -> (newReconScm.getContainerManager().containerExist(ContainerID.valueOf(containerID))));
}
use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestReconScmSnapshot method testSnapshot.
public static void testSnapshot(MiniOzoneCluster cluster) throws Exception {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(LoggerFactory.getLogger(ReconStorageContainerManagerFacade.class));
List<ContainerInfo> reconContainers = cluster.getReconServer().getReconStorageContainerManager().getContainerManager().getContainers();
assertEquals(0, reconContainers.size());
ReconNodeManager nodeManager;
nodeManager = (ReconNodeManager) cluster.getReconServer().getReconStorageContainerManager().getScmNodeManager();
long keyCountBefore = nodeManager.getNodeDBKeyCount();
// Stopping Recon to add Containers in SCM
cluster.stopRecon();
ContainerManager containerManager;
containerManager = cluster.getStorageContainerManager().getContainerManager();
for (int i = 0; i < 10; i++) {
containerManager.allocateContainer(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE), "testOwner");
}
cluster.startRecon();
// ContainerCount after Recon DB is updated with SCM DB
containerManager = cluster.getStorageContainerManager().getContainerManager();
ContainerManager reconContainerManager = cluster.getReconServer().getReconStorageContainerManager().getContainerManager();
assertTrue(logCapturer.getOutput().contains("Recon Container Count: " + reconContainers.size() + ", SCM Container Count: " + containerManager.getContainers().size()));
assertEquals(containerManager.getContainers().size(), reconContainerManager.getContainers().size());
// PipelineCount after Recon DB is updated with SCM DB
PipelineManager scmPipelineManager = cluster.getStorageContainerManager().getPipelineManager();
PipelineManager reconPipelineManager = cluster.getReconServer().getReconStorageContainerManager().getPipelineManager();
assertEquals(scmPipelineManager.getPipelines().size(), reconPipelineManager.getPipelines().size());
// NodeCount after Recon DB updated with SCM DB
nodeManager = (ReconNodeManager) cluster.getReconServer().getReconStorageContainerManager().getScmNodeManager();
long keyCountAfter = nodeManager.getNodeDBKeyCount();
assertEquals(keyCountAfter, keyCountBefore);
}
use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestDecommissionAndMaintenance method waitForAndReturnContainer.
/**
* Get any container present in the cluster and wait to ensure 3 replicas
* have been reported before returning the container.
* @return A single container present on the cluster
* @throws Exception
*/
private ContainerInfo waitForAndReturnContainer() throws Exception {
final ContainerInfo container = cm.getContainers().get(0);
// Ensure all 3 replicas of the container have been reported via ICR
waitForContainerReplicas(container, 3);
return container;
}
use of org.apache.hadoop.hdds.scm.container.ContainerInfo in project ozone by apache.
the class TestDecommissionAndMaintenance method testContainerIsReplicatedWhenAllNodesGotoMaintenance.
@Test
public // return, the excess replicas should be removed.
void testContainerIsReplicatedWhenAllNodesGotoMaintenance() throws Exception {
// Generate some data on the empty cluster to create some containers
generateData(20, "key", ReplicationFactor.THREE, ReplicationType.RATIS);
// Locate any container and find its open pipeline
final ContainerInfo container = waitForAndReturnContainer();
Set<ContainerReplica> replicas = getContainerReplicas(container);
List<DatanodeDetails> forMaintenance = new ArrayList<>();
replicas.forEach(r -> forMaintenance.add(r.getDatanodeDetails()));
scmClient.startMaintenanceNodes(forMaintenance.stream().map(d -> getDNHostAndPort(d)).collect(Collectors.toList()), 0);
// Ensure all 3 DNs go to maintenance
for (DatanodeDetails dn : forMaintenance) {
waitForDnToReachPersistedOpState(dn, IN_MAINTENANCE);
}
// There should now be 5-6 replicas of the container we are tracking
Set<ContainerReplica> newReplicas = cm.getContainerReplicas(container.containerID());
assertTrue(newReplicas.size() >= 5);
scmClient.recommissionNodes(forMaintenance.stream().map(d -> getDNHostAndPort(d)).collect(Collectors.toList()));
// Ensure all 3 DNs go to maintenance
for (DatanodeDetails dn : forMaintenance) {
waitForDnToReachOpState(dn, IN_SERVICE);
}
waitForContainerReplicas(container, 3);
}
Aggregations