use of org.apache.hadoop.hdds.scm.exceptions.SCMException in project ozone by apache.
the class PipelineChoosePolicyFactory method getPolicy.
public static PipelineChoosePolicy getPolicy(ConfigurationSource conf) throws SCMException {
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
Class<? extends PipelineChoosePolicy> policyClass = getClass(scmConfig.getPipelineChoosePolicyName(), PipelineChoosePolicy.class);
try {
return createPipelineChoosePolicyFromClass(policyClass);
} catch (Exception e) {
if (policyClass != OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT) {
LOG.error("Met an exception while create pipeline choose policy " + "for the given class " + policyClass.getName() + ". Fallback to the default pipeline choose policy " + OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT, e);
return createPipelineChoosePolicyFromClass(OZONE_SCM_PIPELINE_CHOOSE_POLICY_IMPL_DEFAULT);
}
throw e;
}
}
use of org.apache.hadoop.hdds.scm.exceptions.SCMException in project ozone by apache.
the class BlockManagerImpl method deleteBlocks.
/**
* Deletes a list of blocks in an atomic operation. Internally, SCM writes
* these blocks into a
* {@link DeletedBlockLog} and deletes them from SCM DB. If this is
* successful, given blocks are
* entering pending deletion state and becomes invisible from SCM namespace.
*
* @param keyBlocksInfoList . This is the list of BlockGroup which contains
* groupID of keys and list of BlockIDs associated with them.
* @throws IOException if exception happens, non of the blocks is deleted.
*/
@Override
public void deleteBlocks(List<BlockGroup> keyBlocksInfoList) throws IOException {
if (scm.getScmContext().isInSafeMode()) {
throw new SCMException("SafeModePrecheck failed for deleteBlocks", SCMException.ResultCodes.SAFE_MODE_EXCEPTION);
}
Map<Long, List<Long>> containerBlocks = new HashMap<>();
// TODO: used space when the block is deleted.
for (BlockGroup bg : keyBlocksInfoList) {
if (LOG.isDebugEnabled()) {
LOG.debug("Deleting blocks {}", StringUtils.join(",", bg.getBlockIDList()));
}
for (BlockID block : bg.getBlockIDList()) {
long containerID = block.getContainerID();
if (containerBlocks.containsKey(containerID)) {
containerBlocks.get(containerID).add(block.getLocalID());
} else {
List<Long> item = new ArrayList<>();
item.add(block.getLocalID());
containerBlocks.put(containerID, item);
}
}
}
try {
deletedBlockLog.addTransactions(containerBlocks);
} catch (IOException e) {
throw new IOException("Skip writing the deleted blocks info to" + " the delLog because addTransaction fails. " + keyBlocksInfoList.size() + "Keys skipped", e);
}
// TODO: Container report handling of the deleted blocks:
// Remove tombstone and update open container usage.
// We will revisit this when the closed container replication is done.
}
use of org.apache.hadoop.hdds.scm.exceptions.SCMException in project ozone by apache.
the class TestSCMNodeManager method assertPipelineCreationFailsWithNotEnoughNodes.
private void assertPipelineCreationFailsWithNotEnoughNodes(int actualNodeCount) throws Exception {
try {
ReplicationConfig ratisThree = ReplicationConfig.fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
scm.getPipelineManager().createPipeline(ratisThree);
Assert.fail("3 nodes should not have been found for a pipeline.");
} catch (SCMException ex) {
Assert.assertTrue(ex.getMessage().contains("Required 3. Found " + actualNodeCount));
}
}
use of org.apache.hadoop.hdds.scm.exceptions.SCMException in project ozone by apache.
the class TestPipelineDatanodesIntersection method testPipelineDatanodesIntersection.
@Test
public void testPipelineDatanodesIntersection() throws IOException {
NodeManager nodeManager = new MockNodeManager(true, nodeCount);
conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness);
conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
SCMHAManager scmhaManager = MockSCMHAManager.getInstance(true);
PipelineStateManager stateManager = PipelineStateManagerImpl.newBuilder().setPipelineStore(SCMDBDefinition.PIPELINES.getTable(dbStore)).setRatisServer(scmhaManager.getRatisServer()).setNodeManager(nodeManager).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
PipelineProvider provider = new MockRatisPipelineProvider(nodeManager, stateManager, conf);
int healthyNodeCount = nodeManager.getNodeCount(NodeStatus.inServiceHealthy());
int intersectionCount = 0;
int createdPipelineCount = 0;
while (!end && createdPipelineCount <= healthyNodeCount * nodeHeaviness) {
try {
Pipeline pipeline = provider.create(RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
HddsProtos.Pipeline pipelineProto = pipeline.getProtobufMessage(ClientVersions.CURRENT_VERSION);
stateManager.addPipeline(pipelineProto);
nodeManager.addPipeline(pipeline);
List<Pipeline> overlapPipelines = RatisPipelineUtils.checkPipelineContainSameDatanodes(stateManager, pipeline);
if (overlapPipelines.isEmpty()) {
intersectionCount++;
for (Pipeline overlapPipeline : overlapPipelines) {
LOG.info("This pipeline: " + pipeline.getId().toString() + " overlaps with previous pipeline: " + overlapPipeline.getId() + ". They share same set of datanodes as: " + pipeline.getNodesInOrder().get(0).getUuid() + "/" + pipeline.getNodesInOrder().get(1).getUuid() + "/" + pipeline.getNodesInOrder().get(2).getUuid() + " and " + overlapPipeline.getNodesInOrder().get(0).getUuid() + "/" + overlapPipeline.getNodesInOrder().get(1).getUuid() + "/" + overlapPipeline.getNodesInOrder().get(2).getUuid() + " is the same.");
}
}
createdPipelineCount++;
} catch (SCMException e) {
end = true;
} catch (IOException e) {
end = true;
// Should not throw regular IOException.
Assert.fail();
}
}
end = false;
LOG.info("Among total " + stateManager.getPipelines(RatisReplicationConfig.getInstance(ReplicationFactor.THREE)).size() + " created pipelines" + " with " + healthyNodeCount + " healthy datanodes and " + nodeHeaviness + " as node heaviness, " + intersectionCount + " pipelines has same set of datanodes.");
}
use of org.apache.hadoop.hdds.scm.exceptions.SCMException in project ozone by apache.
the class TestPipelinePlacementPolicy method insertHeavyNodesIntoNodeManager.
private void insertHeavyNodesIntoNodeManager(List<DatanodeDetails> nodes, int heavyNodeCount) throws IOException {
if (nodes == null) {
throw new SCMException("", SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
}
int considerHeavyCount = conf.getInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT) + 1;
for (DatanodeDetails node : nodes) {
if (heavyNodeCount > 0) {
int pipelineCount = 0;
List<DatanodeDetails> dnList = new ArrayList<>();
dnList.add(node);
dnList.add(MockDatanodeDetails.randomDatanodeDetails());
dnList.add(MockDatanodeDetails.randomDatanodeDetails());
Pipeline pipeline;
HddsProtos.Pipeline pipelineProto;
while (pipelineCount < considerHeavyCount) {
pipeline = Pipeline.newBuilder().setId(PipelineID.randomId()).setState(Pipeline.PipelineState.OPEN).setReplicationConfig(ReplicationConfig.fromProtoTypeAndFactor(RATIS, THREE)).setNodes(dnList).build();
pipelineProto = pipeline.getProtobufMessage(ClientVersions.CURRENT_VERSION);
nodeManager.addPipeline(pipeline);
stateManager.addPipeline(pipelineProto);
pipelineCount++;
}
heavyNodeCount--;
}
}
}
Aggregations