use of org.apache.hadoop.hdds.scm.server.SCMConfigurator in project ozone by apache.
the class TestKeyManagerImpl method setUp.
@BeforeClass
public static void setUp() throws Exception {
conf = new OzoneConfiguration();
dir = GenericTestUtils.getRandomizedTestDir();
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, dir.toString());
conf.set(OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, "true");
mockScmBlockLocationProtocol = mock(ScmBlockLocationProtocol.class);
nodeManager = new MockNodeManager(true, 10);
NodeSchema[] schemas = new NodeSchema[] { ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA };
NodeSchemaManager schemaManager = NodeSchemaManager.getInstance();
schemaManager.init(schemas, false);
NetworkTopology clusterMap = new NetworkTopologyImpl(schemaManager);
nodeManager.getAllNodes().stream().forEach(node -> {
node.setNetworkName(node.getUuidString());
clusterMap.add(node);
});
((MockNodeManager) nodeManager).setNetworkTopology(clusterMap);
SCMConfigurator configurator = new SCMConfigurator();
configurator.setScmNodeManager(nodeManager);
configurator.setNetworkTopology(clusterMap);
configurator.setSCMHAManager(MockSCMHAManager.getInstance(true));
configurator.setScmContext(SCMContext.emptyContext());
scm = HddsTestUtils.getScm(conf, configurator);
scm.start();
scm.exitSafeMode();
scmBlockSize = (long) conf.getStorageSize(OZONE_SCM_BLOCK_SIZE, OZONE_SCM_BLOCK_SIZE_DEFAULT, StorageUnit.BYTES);
conf.setLong(OZONE_KEY_PREALLOCATION_BLOCKS_MAX, 10);
mockScmContainerClient = Mockito.mock(StorageContainerLocationProtocol.class);
OmTestManagers omTestManagers = new OmTestManagers(conf, scm.getBlockProtocolServer(), mockScmContainerClient);
om = omTestManagers.getOzoneManager();
metadataManager = omTestManagers.getMetadataManager();
keyManager = (KeyManagerImpl) omTestManagers.getKeyManager();
prefixManager = omTestManagers.getPrefixManager();
writeClient = omTestManagers.getWriteClient();
mockContainerClient();
Mockito.when(mockScmBlockLocationProtocol.allocateBlock(Mockito.anyLong(), Mockito.anyInt(), any(ReplicationConfig.class), Mockito.anyString(), any(ExcludeList.class))).thenThrow(new SCMException("SafeModePrecheck failed for allocateBlock", ResultCodes.SAFE_MODE_EXCEPTION));
createVolume(VOLUME_NAME);
createBucket(VOLUME_NAME, BUCKET_NAME, false);
createBucket(VOLUME_NAME, VERSIONED_BUCKET_NAME, true);
}
use of org.apache.hadoop.hdds.scm.server.SCMConfigurator in project ozone by apache.
the class HddsTestUtils method getScm.
/**
* Construct and returns StorageContainerManager instance using the given
* configuration. The ports used by this StorageContainerManager are
* randomly selected from free ports available.
*
* @param conf OzoneConfiguration
* @return StorageContainerManager instance
* @throws IOException
* @throws AuthenticationException
*/
public static StorageContainerManager getScm(OzoneConfiguration conf) throws IOException, AuthenticationException {
SCMConfigurator configurator = new SCMConfigurator();
configurator.setSCMHAManager(MockSCMHAManager.getInstance(true));
configurator.setScmContext(SCMContext.emptyContext());
return getScm(conf, configurator);
}
use of org.apache.hadoop.hdds.scm.server.SCMConfigurator in project ozone by apache.
the class TestBlockManager method setUp.
@Before
public void setUp() throws Exception {
conf = SCMTestUtils.getConf();
numContainerPerOwnerInPipeline = conf.getInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT);
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().toString());
conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
conf.setTimeDuration(HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL, 5, TimeUnit.SECONDS);
// Override the default Node Manager and SCMHAManager
// in SCM with the Mock one.
nodeManager = new MockNodeManager(true, 10);
scmHAManager = MockSCMHAManager.getInstance(true);
eventQueue = new EventQueue();
scmContext = SCMContext.emptyContext();
serviceManager = new SCMServiceManager();
scmMetadataStore = new SCMMetadataStoreImpl(conf);
scmMetadataStore.start(conf);
sequenceIdGen = new SequenceIdGenerator(conf, scmHAManager, scmMetadataStore.getSequenceIdTable());
pipelineManager = PipelineManagerImpl.newPipelineManager(conf, scmHAManager, nodeManager, scmMetadataStore.getPipelineTable(), eventQueue, scmContext, serviceManager);
PipelineProvider mockRatisProvider = new MockRatisPipelineProvider(nodeManager, pipelineManager.getStateManager(), conf, eventQueue);
pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS, mockRatisProvider);
ContainerManager containerManager = new ContainerManagerImpl(conf, scmHAManager, sequenceIdGen, pipelineManager, scmMetadataStore.getContainerTable());
SCMSafeModeManager safeModeManager = new SCMSafeModeManager(conf, containerManager.getContainers(), containerManager, pipelineManager, eventQueue, serviceManager, scmContext) {
@Override
public void emitSafeModeStatus() {
// skip
}
};
SCMConfigurator configurator = new SCMConfigurator();
configurator.setScmNodeManager(nodeManager);
configurator.setPipelineManager(pipelineManager);
configurator.setContainerManager(containerManager);
configurator.setScmSafeModeManager(safeModeManager);
configurator.setMetadataStore(scmMetadataStore);
configurator.setSCMHAManager(scmHAManager);
configurator.setScmContext(scmContext);
scm = HddsTestUtils.getScm(conf, configurator);
// Initialize these fields so that the tests can pass.
mapping = scm.getContainerManager();
blockManager = (BlockManagerImpl) scm.getScmBlockManager();
DatanodeCommandHandler handler = new DatanodeCommandHandler();
eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, handler);
CloseContainerEventHandler closeContainerHandler = new CloseContainerEventHandler(pipelineManager, mapping, scmContext);
eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
replicationConfig = RatisReplicationConfig.getInstance(ReplicationFactor.THREE);
scm.getScmContext().updateSafeModeStatus(new SafeModeStatus(false, true));
}
Aggregations