use of org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException in project ozone by apache.
the class TestReconAsPassiveScm method testDatanodeRegistrationAndReports.
@Test
public void testDatanodeRegistrationAndReports() throws Exception {
ReconStorageContainerManagerFacade reconScm = (ReconStorageContainerManagerFacade) cluster.getReconServer().getReconStorageContainerManager();
StorageContainerManager scm = cluster.getStorageContainerManager();
PipelineManager reconPipelineManager = reconScm.getPipelineManager();
PipelineManager scmPipelineManager = scm.getPipelineManager();
LambdaTestUtils.await(60000, 5000, () -> (reconPipelineManager.getPipelines().size() >= 4));
// Verify if Recon has all the pipelines from SCM.
scmPipelineManager.getPipelines().forEach(p -> {
try {
assertNotNull(reconPipelineManager.getPipeline(p.getId()));
} catch (PipelineNotFoundException e) {
Assert.fail();
}
});
// Verify we can never create a pipeline in Recon.
LambdaTestUtils.intercept(UnsupportedOperationException.class, "Trying to create pipeline in Recon, which is prohibited!", () -> reconPipelineManager.createPipeline(RatisReplicationConfig.getInstance(ONE)));
ContainerManager scmContainerManager = scm.getContainerManager();
assertTrue(scmContainerManager.getContainers().isEmpty());
// Verify if all the 3 nodes are registered with Recon.
NodeManager reconNodeManager = reconScm.getScmNodeManager();
NodeManager scmNodeManager = scm.getScmNodeManager();
assertEquals(scmNodeManager.getAllNodes().size(), reconNodeManager.getAllNodes().size());
// Create container
ContainerManager reconContainerManager = reconScm.getContainerManager();
ContainerInfo containerInfo = scmContainerManager.allocateContainer(RatisReplicationConfig.getInstance(ONE), "test");
long containerID = containerInfo.getContainerID();
Pipeline pipeline = scmPipelineManager.getPipeline(containerInfo.getPipelineID());
XceiverClientGrpc client = new XceiverClientGrpc(pipeline, conf);
runTestOzoneContainerViaDataNode(containerID, client);
// Verify Recon picked up the new container that was created.
assertEquals(scmContainerManager.getContainerIDs(), reconContainerManager.getContainerIDs());
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer.captureLogs(ReconNodeManager.LOG);
GenericTestUtils.setLogLevel(ReconNodeManager.LOG, Level.DEBUG);
reconScm.getEventQueue().fireEvent(CLOSE_CONTAINER, containerInfo.containerID());
GenericTestUtils.waitFor(() -> logCapturer.getOutput().contains("Ignoring unsupported command closeContainerCommand"), 1000, 20000);
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException in project ozone by apache.
the class SCMNodeManager method getPeerList.
@Override
public Collection<DatanodeDetails> getPeerList(DatanodeDetails dn) {
HashSet<DatanodeDetails> dns = new HashSet<>();
Preconditions.checkNotNull(dn);
Set<PipelineID> pipelines = nodeStateManager.getPipelineByDnID(dn.getUuid());
PipelineManager pipelineManager = scmContext.getScm().getPipelineManager();
if (!pipelines.isEmpty()) {
pipelines.forEach(id -> {
try {
Pipeline pipeline = pipelineManager.getPipeline(id);
List<DatanodeDetails> peers = pipeline.getNodes();
dns.addAll(peers);
} catch (PipelineNotFoundException pnfe) {
// ignore the pipeline not found exception here
}
});
}
// renove self node from the set
dns.remove(dn);
return dns;
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException in project ozone by apache.
the class ContainerStateManagerImpl method addContainer.
@Override
public void addContainer(final ContainerInfoProto containerInfo) throws IOException {
// Change the exception thrown to PipelineNotFound and
// ClosedPipelineException once ClosedPipelineException is introduced
// in PipelineManager.
Preconditions.checkNotNull(containerInfo);
final ContainerInfo container = ContainerInfo.fromProtobuf(containerInfo);
final ContainerID containerID = container.containerID();
final PipelineID pipelineID = container.getPipelineID();
lock.writeLock().lock();
try {
if (!containers.contains(containerID)) {
ExecutionUtil.create(() -> {
transactionBuffer.addToBuffer(containerStore, containerID, container);
containers.addContainer(container);
if (pipelineManager.containsPipeline(pipelineID)) {
pipelineManager.addContainerToPipeline(pipelineID, containerID);
} else if (containerInfo.getState().equals(HddsProtos.LifeCycleState.OPEN)) {
// Pipeline should exist, but not
throw new PipelineNotFoundException();
}
// recon may receive report of closed container,
// no corresponding Pipeline can be synced for scm.
// just only add the container.
}).onException(() -> {
containers.removeContainer(containerID);
transactionBuffer.removeFromBuffer(containerStore, containerID);
}).execute();
}
} finally {
lock.writeLock().unlock();
}
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException in project ozone by apache.
the class MinLeaderCountChoosePolicy method getSuggestedLeaderCount.
private Map<DatanodeDetails, Integer> getSuggestedLeaderCount(List<DatanodeDetails> dns, NodeManager nodeManager, PipelineStateManager pipelineStateManager) {
Map<DatanodeDetails, Integer> suggestedLeaderCount = new HashMap<>();
for (DatanodeDetails dn : dns) {
suggestedLeaderCount.put(dn, 0);
Set<PipelineID> pipelineIDSet = nodeManager.getPipelines(dn);
for (PipelineID pipelineID : pipelineIDSet) {
try {
Pipeline pipeline = pipelineStateManager.getPipeline(pipelineID);
if (!pipeline.isClosed() && dn.getUuid().equals(pipeline.getSuggestedLeaderId())) {
suggestedLeaderCount.put(dn, suggestedLeaderCount.get(dn) + 1);
}
} catch (PipelineNotFoundException e) {
LOG.debug("Pipeline not found in pipeline state manager : {}", pipelineID, e);
}
}
}
return suggestedLeaderCount;
}
use of org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException in project ozone by apache.
the class NodeEndpoint method getDatanodes.
/**
* Return the list of datanodes with detailed information about each datanode.
* @return {@link Response}
*/
@GET
public Response getDatanodes() {
List<DatanodeMetadata> datanodes = new ArrayList<>();
List<DatanodeDetails> datanodeDetails = nodeManager.getAllNodes();
datanodeDetails.forEach(datanode -> {
DatanodeStorageReport storageReport = getStorageReport(datanode);
NodeState nodeState = null;
try {
nodeState = nodeManager.getNodeStatus(datanode).getHealth();
} catch (NodeNotFoundException e) {
LOG.warn("Cannot get nodeState for datanode {}", datanode, e);
}
final NodeOperationalState nodeOpState = datanode.getPersistedOpState();
String hostname = datanode.getHostName();
Set<PipelineID> pipelineIDs = nodeManager.getPipelines(datanode);
List<DatanodePipeline> pipelines = new ArrayList<>();
AtomicInteger leaderCount = new AtomicInteger();
AtomicInteger openContainers = new AtomicInteger();
DatanodeMetadata.Builder builder = DatanodeMetadata.newBuilder();
pipelineIDs.forEach(pipelineID -> {
try {
Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
String leaderNode = pipeline.getLeaderNode().getHostName();
DatanodePipeline datanodePipeline = new DatanodePipeline(pipelineID.getId(), pipeline.getReplicationConfig().getReplicationType().toString(), ReplicationConfig.getLegacyFactor(pipeline.getReplicationConfig()).getNumber(), leaderNode);
pipelines.add(datanodePipeline);
if (datanode.getUuid().equals(pipeline.getLeaderId())) {
leaderCount.getAndIncrement();
}
int openContainerPerPipeline = reconContainerManager.getPipelineToOpenContainer().getOrDefault(pipelineID, 0);
openContainers.getAndAdd(openContainerPerPipeline);
} catch (PipelineNotFoundException ex) {
LOG.warn("Cannot get pipeline {} for datanode {}, pipeline not found", pipelineID.getId(), hostname, ex);
} catch (IOException ioEx) {
LOG.warn("Cannot get leader node of pipeline with id {}.", pipelineID.getId(), ioEx);
}
});
try {
Set<ContainerID> allContainers = nodeManager.getContainers(datanode);
builder.withContainers(allContainers.size());
builder.withOpenContainers(openContainers.get());
} catch (NodeNotFoundException ex) {
LOG.warn("Cannot get containers, datanode {} not found.", datanode.getUuid(), ex);
}
DatanodeInfo dnInfo = (DatanodeInfo) datanode;
datanodes.add(builder.withHostname(nodeManager.getHostName(datanode)).withDatanodeStorageReport(storageReport).withLastHeartbeat(nodeManager.getLastHeartbeat(datanode)).withState(nodeState).withOperationalState(nodeOpState).withPipelines(pipelines).withLeaderCount(leaderCount.get()).withUUid(datanode.getUuidString()).withVersion(nodeManager.getVersion(datanode)).withSetupTime(nodeManager.getSetupTime(datanode)).withRevision(nodeManager.getRevision(datanode)).withBuildDate(nodeManager.getBuildDate(datanode)).withLayoutVersion(dnInfo.getLastKnownLayoutVersion().getMetadataLayoutVersion()).build());
});
DatanodesResponse datanodesResponse = new DatanodesResponse(datanodes.size(), datanodes);
return Response.ok(datanodesResponse).build();
}
Aggregations