use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.
the class TestReconPipelineManager method testAddPipeline.
@Test
public void testAddPipeline() throws IOException {
Pipeline pipeline = getRandomPipeline();
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
this.versionManager = Mockito.mock(HDDSLayoutVersionManager.class);
Mockito.when(versionManager.getMetadataLayoutVersion()).thenReturn(maxLayoutVersion());
Mockito.when(versionManager.getSoftwareLayoutVersion()).thenReturn(maxLayoutVersion());
NodeManager nodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap, SCMContext.emptyContext(), versionManager);
ReconPipelineManager reconPipelineManager = ReconPipelineManager.newReconPipelineManager(conf, nodeManager, ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue, scmhaManager, scmContext);
assertFalse(reconPipelineManager.containsPipeline(pipeline.getId()));
reconPipelineManager.addPipeline(pipeline);
assertTrue(reconPipelineManager.containsPipeline(pipeline.getId()));
}
use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.
the class TestStorageContainerManager method testBlockDeletingThrottling.
@Test
public void testBlockDeletingThrottling() throws Exception {
int numKeys = 15;
OzoneConfiguration conf = new OzoneConfiguration();
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS);
conf.setInt(ScmConfigKeys.OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 5);
conf.setTimeDuration(OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS);
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
scmConfig.setBlockDeletionInterval(Duration.ofMillis(100));
conf.setFromObject(scmConfig);
conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, numKeys);
conf.setBoolean(HDDS_SCM_SAFEMODE_PIPELINE_CREATION, false);
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf).setHbInterval(1000).setHbProcessorInterval(3000).setNumDatanodes(1).build();
cluster.waitForClusterToBeReady();
cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 30000);
try {
DeletedBlockLog delLog = cluster.getStorageContainerManager().getScmBlockManager().getDeletedBlockLog();
Assert.assertEquals(0, delLog.getNumOfValidTransactions());
int limitSize = 1;
// Reset limit value to 1, so that we only allow one TX is dealt per
// datanode.
SCMBlockDeletingService delService = cluster.getStorageContainerManager().getScmBlockManager().getSCMBlockDeletingService();
delService.setBlockDeleteTXNum(limitSize);
// Create {numKeys} random names keys.
TestStorageContainerManagerHelper helper = new TestStorageContainerManagerHelper(cluster, conf);
Map<String, OmKeyInfo> keyLocations = helper.createKeys(numKeys, 4096);
// Wait for container report
Thread.sleep(5000);
for (OmKeyInfo keyInfo : keyLocations.values()) {
OzoneTestUtils.closeContainers(keyInfo.getKeyLocationVersions(), cluster.getStorageContainerManager());
}
createDeleteTXLog(cluster.getStorageContainerManager(), delLog, keyLocations, helper);
// Verify a few TX gets created in the TX log.
Assert.assertTrue(delLog.getNumOfValidTransactions() > 0);
// Verify the size in delete commands is expected.
GenericTestUtils.waitFor(() -> {
NodeManager nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
LayoutVersionManager versionManager = nodeManager.getLayoutVersionManager();
StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo = StorageContainerDatanodeProtocolProtos.LayoutVersionProto.newBuilder().setSoftwareLayoutVersion(versionManager.getSoftwareLayoutVersion()).setMetadataLayoutVersion(versionManager.getMetadataLayoutVersion()).build();
List<SCMCommand> commands = nodeManager.processHeartbeat(nodeManager.getNodes(NodeStatus.inServiceHealthy()).get(0), layoutInfo);
if (commands != null) {
for (SCMCommand cmd : commands) {
if (cmd.getType() == SCMCommandProto.Type.deleteBlocksCommand) {
List<DeletedBlocksTransaction> deletedTXs = ((DeleteBlocksCommand) cmd).blocksTobeDeleted();
return deletedTXs != null && deletedTXs.size() == limitSize;
}
}
}
return false;
}, 500, 10000);
} finally {
cluster.shutdown();
}
}
use of org.apache.hadoop.hdds.scm.node.NodeManager in project ozone by apache.
the class ContainerPlacementPolicyFactory method getPolicy.
public static PlacementPolicy getPolicy(ConfigurationSource conf, final NodeManager nodeManager, NetworkTopology clusterMap, final boolean fallback, SCMContainerPlacementMetrics metrics) throws SCMException {
final Class<? extends PlacementPolicy> placementClass = conf.getClass(ScmConfigKeys.OZONE_SCM_CONTAINER_PLACEMENT_IMPL_KEY, OZONE_SCM_CONTAINER_PLACEMENT_IMPL_DEFAULT, PlacementPolicy.class);
Constructor<? extends PlacementPolicy> constructor;
try {
constructor = placementClass.getDeclaredConstructor(NodeManager.class, ConfigurationSource.class, NetworkTopology.class, boolean.class, SCMContainerPlacementMetrics.class);
LOG.info("Create container placement policy of type {}", placementClass.getCanonicalName());
} catch (NoSuchMethodException e) {
String msg = "Failed to find constructor(NodeManager, Configuration, " + "NetworkTopology, boolean) for class " + placementClass.getCanonicalName();
LOG.error(msg);
throw new SCMException(msg, SCMException.ResultCodes.FAILED_TO_INIT_CONTAINER_PLACEMENT_POLICY);
}
try {
return constructor.newInstance(nodeManager, conf, clusterMap, fallback, metrics);
} catch (Exception e) {
throw new RuntimeException("Failed to instantiate class " + placementClass.getCanonicalName() + " for " + e.getMessage());
}
}
Aggregations