Search in sources :

Example 1 with SCMNodeManager

use of org.apache.hadoop.hdds.scm.node.SCMNodeManager in project ozone by apache.

the class TestIncrementalContainerReportHandler method setup.

@Before
public void setup() throws IOException, InvalidStateTransitionException {
    final OzoneConfiguration conf = new OzoneConfiguration();
    final String path = GenericTestUtils.getTempPath(UUID.randomUUID().toString());
    Path scmPath = Paths.get(path, "scm-meta");
    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
    this.containerManager = Mockito.mock(ContainerManager.class);
    NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
    EventQueue eventQueue = new EventQueue();
    SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
    this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
    Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
    Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
    this.nodeManager = new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap, scmContext, versionManager);
    scmhaManager = MockSCMHAManager.getInstance(true);
    testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
    dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
    pipelineManager = new MockPipelineManager(dbStore, scmhaManager, nodeManager);
    this.containerStateManager = ContainerStateManagerImpl.newBuilder().setConfiguration(conf).setPipelineManager(pipelineManager).setRatisServer(scmhaManager.getRatisServer()).setContainerStore(SCMDBDefinition.CONTAINERS.getTable(dbStore)).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
    this.publisher = Mockito.mock(EventPublisher.class);
    Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainer(((ContainerID) invocation.getArguments()[0])));
    Mockito.when(containerManager.getContainerReplicas(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainerReplicas(((ContainerID) invocation.getArguments()[0])));
    Mockito.doAnswer(invocation -> {
        containerStateManager.removeContainerReplica(((ContainerID) invocation.getArguments()[0]), (ContainerReplica) invocation.getArguments()[1]);
        return null;
    }).when(containerManager).removeContainerReplica(Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class));
    Mockito.doAnswer(invocation -> {
        containerStateManager.updateContainerState(((ContainerID) invocation.getArguments()[0]).getProtobuf(), (HddsProtos.LifeCycleEvent) invocation.getArguments()[1]);
        return null;
    }).when(containerManager).updateContainerState(Mockito.any(ContainerID.class), Mockito.any(HddsProtos.LifeCycleEvent.class));
    Mockito.doAnswer(invocation -> {
        containerStateManager.updateContainerReplica(((ContainerID) invocation.getArguments()[0]), (ContainerReplica) invocation.getArguments()[1]);
        return null;
    }).when(containerManager).updateContainerReplica(Mockito.any(ContainerID.class), Mockito.any(ContainerReplica.class));
}
Also used : Path(java.nio.file.Path) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) SCMStorageConfig(org.apache.hadoop.hdds.scm.server.SCMStorageConfig) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) MockPipelineManager(org.apache.hadoop.hdds.scm.pipeline.MockPipelineManager) HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) SCMDBDefinition(org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition) Before(org.junit.Before)

Example 2 with SCMNodeManager

use of org.apache.hadoop.hdds.scm.node.SCMNodeManager in project ozone by apache.

the class StorageContainerManager method initializeSystemManagers.

/**
 * This function initializes the following managers. If the configurator
 * specifies a value, we will use it, else we will use the default value.
 *
 *  Node Manager
 *  Pipeline Manager
 *  Container Manager
 *  Block Manager
 *  Replication Manager
 *  Safe Mode Manager
 *
 * @param conf - Ozone Configuration.
 * @param configurator - A customizer which allows different managers to be
 *                    used if needed.
 * @throws IOException - on Failure.
 */
private void initializeSystemManagers(OzoneConfiguration conf, SCMConfigurator configurator) throws IOException {
    if (configurator.getNetworkTopology() != null) {
        clusterMap = configurator.getNetworkTopology();
    } else {
        clusterMap = new NetworkTopologyImpl(conf);
    }
    // This needs to be done before initializing Ratis.
    RatisDropwizardExports.registerRatisMetricReporters(ratisMetricsMap);
    if (configurator.getSCMHAManager() != null) {
        scmHAManager = configurator.getSCMHAManager();
    } else {
        scmHAManager = new SCMHAManagerImpl(conf, this);
    }
    // inline upgrade for SequenceIdGenerator
    SequenceIdGenerator.upgradeToSequenceId(scmMetadataStore);
    // Distributed sequence id generator
    sequenceIdGen = new SequenceIdGenerator(conf, scmHAManager, scmMetadataStore.getSequenceIdTable());
    if (configurator.getScmContext() != null) {
        scmContext = configurator.getScmContext();
    } else {
        // When term equals SCMContext.INVALID_TERM, the isLeader() check
        // and getTermOfLeader() will always pass.
        long term = SCMHAUtils.isSCMHAEnabled(conf) ? 0 : SCMContext.INVALID_TERM;
        // non-leader of term 0, in safe mode, preCheck not completed.
        scmContext = new SCMContext.Builder().setLeader(false).setTerm(term).setIsInSafeMode(true).setIsPreCheckComplete(false).setSCM(this).build();
    }
    if (configurator.getScmNodeManager() != null) {
        scmNodeManager = configurator.getScmNodeManager();
    } else {
        scmNodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, scmContext, scmLayoutVersionManager);
    }
    placementMetrics = SCMContainerPlacementMetrics.create();
    containerPlacementPolicy = ContainerPlacementPolicyFactory.getPolicy(conf, scmNodeManager, clusterMap, true, placementMetrics);
    if (configurator.getPipelineManager() != null) {
        pipelineManager = configurator.getPipelineManager();
    } else {
        pipelineManager = PipelineManagerImpl.newPipelineManager(conf, scmHAManager, scmNodeManager, scmMetadataStore.getPipelineTable(), eventQueue, scmContext, serviceManager);
    }
    if (configurator.getContainerManager() != null) {
        containerManager = configurator.getContainerManager();
    } else {
        containerManager = new ContainerManagerImpl(conf, scmHAManager, sequenceIdGen, pipelineManager, scmMetadataStore.getContainerTable());
    }
    pipelineChoosePolicy = PipelineChoosePolicyFactory.getPolicy(conf);
    if (configurator.getWritableContainerFactory() != null) {
        writableContainerFactory = configurator.getWritableContainerFactory();
    } else {
        writableContainerFactory = new WritableContainerFactory(this);
    }
    if (configurator.getScmBlockManager() != null) {
        scmBlockManager = configurator.getScmBlockManager();
    } else {
        scmBlockManager = new BlockManagerImpl(conf, this);
    }
    if (configurator.getReplicationManager() != null) {
        replicationManager = configurator.getReplicationManager();
    } else {
        replicationManager = new ReplicationManager(conf, containerManager, containerPlacementPolicy, eventQueue, scmContext, serviceManager, scmNodeManager, new MonotonicClock(ZoneOffset.UTC), scmHAManager, getScmMetadataStore().getMoveTable());
    }
    if (configurator.getScmSafeModeManager() != null) {
        scmSafeModeManager = configurator.getScmSafeModeManager();
    } else {
        scmSafeModeManager = new SCMSafeModeManager(conf, containerManager.getContainers(), containerManager, pipelineManager, eventQueue, serviceManager, scmContext);
    }
    scmDecommissionManager = new NodeDecommissionManager(conf, scmNodeManager, containerManager, scmContext, eventQueue, replicationManager);
}
Also used : ReplicationManager(org.apache.hadoop.hdds.scm.container.ReplicationManager) SCMHAManagerImpl(org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl) SCMSafeModeManager(org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager) ContainerManagerImpl(org.apache.hadoop.hdds.scm.container.ContainerManagerImpl) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) SCMContext(org.apache.hadoop.hdds.scm.ha.SCMContext) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) SequenceIdGenerator(org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator) WritableContainerFactory(org.apache.hadoop.hdds.scm.pipeline.WritableContainerFactory) NodeDecommissionManager(org.apache.hadoop.hdds.scm.node.NodeDecommissionManager) MonotonicClock(org.apache.hadoop.ozone.common.MonotonicClock) BlockManagerImpl(org.apache.hadoop.hdds.scm.block.BlockManagerImpl)

Example 3 with SCMNodeManager

use of org.apache.hadoop.hdds.scm.node.SCMNodeManager in project ozone by apache.

the class TestSCMNodeMetrics method setup.

@BeforeClass
public static void setup() throws Exception {
    OzoneConfiguration source = new OzoneConfiguration();
    EventQueue publisher = new EventQueue();
    SCMStorageConfig config = new SCMStorageConfig(NodeType.DATANODE, new File("/tmp"), "storage");
    HDDSLayoutVersionManager versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
    Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
    Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
    nodeManager = new SCMNodeManager(source, config, publisher, new NetworkTopologyImpl(source), SCMContext.emptyContext(), versionManager);
    registeredDatanode = DatanodeDetails.newBuilder().setHostName("localhost").setIpAddress("127.0.0.1").setUuid(UUID.randomUUID()).build();
    nodeManager.register(registeredDatanode, createNodeReport(), PipelineReportsProto.newBuilder().build());
}
Also used : NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) SCMStorageConfig(org.apache.hadoop.hdds.scm.server.SCMStorageConfig) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) File(java.io.File) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) BeforeClass(org.junit.BeforeClass)

Example 4 with SCMNodeManager

use of org.apache.hadoop.hdds.scm.node.SCMNodeManager in project ozone by apache.

the class TestReconPipelineManager method testInitialize.

@Test
public void testInitialize() throws IOException {
    // Get 3 OPEN pipelines from SCM.
    List<Pipeline> pipelinesFromScm = getPipelines(3);
    // Recon has 2 pipelines in ALLOCATED state. (1 is valid and 1 is obsolete)
    // Valid pipeline in Allocated state.
    Pipeline validPipeline = Pipeline.newBuilder().setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationFactor.ONE)).setId(pipelinesFromScm.get(0).getId()).setNodes(pipelinesFromScm.get(0).getNodes()).setState(Pipeline.PipelineState.ALLOCATED).build();
    // Invalid pipeline.
    Pipeline invalidPipeline = Pipeline.newBuilder().setReplicationConfig(StandaloneReplicationConfig.getInstance(ReplicationFactor.ONE)).setId(PipelineID.randomId()).setNodes(Collections.singletonList(randomDatanodeDetails())).setState(Pipeline.PipelineState.CLOSED).build();
    NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
    EventQueue eventQueue = new EventQueue();
    this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
    Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
    Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
    NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, SCMContext.emptyContext(), versionManager);
    try (ReconPipelineManager reconPipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManager, ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue, scmhaManager, scmContext)) {
        scmContext = new SCMContext.Builder().setIsInSafeMode(true).setLeader(true).setIsPreCheckComplete(true).setSCM(mock(StorageContainerManager.class)).build();
        reconPipelineManager.setScmContext(scmContext);
        reconPipelineManager.addPipeline(validPipeline);
        reconPipelineManager.addPipeline(invalidPipeline);
        reconPipelineManager.initializePipelines(pipelinesFromScm);
        List<Pipeline> newReconPipelines = reconPipelineManager.getPipelines();
        // Test if the number of pipelines in SCM is as expected.
        assertEquals(3, newReconPipelines.size());
        // Test if new pipelines from SCM are picked up.
        for (Pipeline pipeline : pipelinesFromScm) {
            assertTrue(reconPipelineManager.containsPipeline(pipeline.getId()));
        }
        // Test if existing pipeline state is updated.
        assertEquals(Pipeline.PipelineState.OPEN, reconPipelineManager.getPipeline(validPipeline.getId()).getPipelineState());
        // Test if obsolete pipelines in Recon are removed.
        assertFalse(reconPipelineManager.containsPipeline(invalidPipeline.getId()));
    }
}
Also used : NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) StorageContainerManager(org.apache.hadoop.hdds.scm.server.StorageContainerManager) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) DBStoreBuilder(org.apache.hadoop.hdds.utils.db.DBStoreBuilder) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) OMMetadataManagerTestUtils.getRandomPipeline(org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline) Pipeline(org.apache.hadoop.hdds.scm.pipeline.Pipeline) Test(org.junit.Test)

Example 5 with SCMNodeManager

use of org.apache.hadoop.hdds.scm.node.SCMNodeManager in project ozone by apache.

the class AbstractReconContainerManagerTest method setUp.

@Before
public void setUp() throws Exception {
    conf = new OzoneConfiguration();
    conf.set(OZONE_METADATA_DIRS, temporaryFolder.newFolder().getAbsolutePath());
    conf.set(OZONE_SCM_NAMES, "localhost");
    store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
    scmhaManager = MockSCMHAManager.getInstance(true, new MockSCMHADBTransactionBuffer(store));
    sequenceIdGen = new SequenceIdGenerator(conf, scmhaManager, ReconSCMDBDefinition.SEQUENCE_ID.getTable(store));
    scmContext = SCMContext.emptyContext();
    scmStorageConfig = new ReconStorageConfig(conf, new ReconUtils());
    NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
    EventQueue eventQueue = new EventQueue();
    layoutVersionManager = mock(HDDSLayoutVersionManager.class);
    when(layoutVersionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
    when(layoutVersionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
    NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, scmContext, layoutVersionManager);
    pipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManager, ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue, scmhaManager, scmContext);
    containerManager = new ReconContainerManager(conf, store, ReconSCMDBDefinition.CONTAINERS.getTable(store), pipelineManager, getScmServiceProvider(), mock(ContainerHealthSchemaManager.class), mock(ReconContainerMetadataManager.class), scmhaManager, sequenceIdGen);
}
Also used : ReconUtils(org.apache.hadoop.ozone.recon.ReconUtils) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) HDDSLayoutVersionManager(org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager) MockSCMHADBTransactionBuffer(org.apache.hadoop.hdds.scm.ha.MockSCMHADBTransactionBuffer) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) NetworkTopologyImpl(org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl) NodeManager(org.apache.hadoop.hdds.scm.node.NodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) SCMNodeManager(org.apache.hadoop.hdds.scm.node.SCMNodeManager) SequenceIdGenerator(org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator) NetworkTopology(org.apache.hadoop.hdds.scm.net.NetworkTopology) Before(org.junit.Before)

Aggregations

NetworkTopologyImpl (org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl)7 SCMNodeManager (org.apache.hadoop.hdds.scm.node.SCMNodeManager)7 EventQueue (org.apache.hadoop.hdds.server.events.EventQueue)6 HDDSLayoutVersionManager (org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager)6 NetworkTopology (org.apache.hadoop.hdds.scm.net.NetworkTopology)5 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)4 NodeManager (org.apache.hadoop.hdds.scm.node.NodeManager)4 SCMStorageConfig (org.apache.hadoop.hdds.scm.server.SCMStorageConfig)3 Test (org.junit.Test)3 Path (java.nio.file.Path)2 SequenceIdGenerator (org.apache.hadoop.hdds.scm.ha.SequenceIdGenerator)2 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)2 EventPublisher (org.apache.hadoop.hdds.server.events.EventPublisher)2 OMMetadataManagerTestUtils.getRandomPipeline (org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline)2 Before (org.junit.Before)2 File (java.io.File)1 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)1 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)1 HddsProtos (org.apache.hadoop.hdds.protocol.proto.HddsProtos)1 IncrementalContainerReportProto (org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto)1