use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode in project ozone by apache.
the class TestReconIncrementalContainerReportHandler method testProcessICRStateMismatch.
@Test
public void testProcessICRStateMismatch() throws IOException {
// Recon container state is "OPEN".
// Replica state could be any Non OPEN state.
long containerId = 11;
for (State state : Arrays.asList(State.CLOSING, State.QUASI_CLOSED, State.CLOSED)) {
ContainerWithPipeline containerWithPipeline = getTestContainer(containerId++, OPEN);
ContainerID containerID = containerWithPipeline.getContainerInfo().containerID();
ReconContainerManager containerManager = getContainerManager();
containerManager.addNewContainer(containerWithPipeline);
DatanodeDetails datanodeDetails = containerWithPipeline.getPipeline().getFirstNode();
NodeManager nodeManagerMock = mock(NodeManager.class);
when(nodeManagerMock.getNodeByUuid(any())).thenReturn(datanodeDetails);
IncrementalContainerReportFromDatanode reportMock = mock(IncrementalContainerReportFromDatanode.class);
when(reportMock.getDatanodeDetails()).thenReturn(containerWithPipeline.getPipeline().getFirstNode());
IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(containerID, state, datanodeDetails.getUuidString());
when(reportMock.getReport()).thenReturn(containerReport);
ReconIncrementalContainerReportHandler reconIcr = new ReconIncrementalContainerReportHandler(nodeManagerMock, containerManager, SCMContext.emptyContext());
reconIcr.onMessage(reportMock, mock(EventPublisher.class));
assertTrue(containerManager.containerExist(containerID));
assertEquals(1, containerManager.getContainerReplicas(containerID).size());
LifeCycleState expectedState = getContainerStateFromReplicaState(state);
LifeCycleState actualState = containerManager.getContainer(containerID).getState();
assertEquals(String.format("Expecting %s in " + "container state for replica state %s", expectedState, state), expectedState, actualState);
}
}
use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode in project ozone by apache.
the class TestIncrementalContainerReportHandler method testICRFCRRace.
@Test
public // until the code was changed to fix the race condition.
void testICRFCRRace() throws IOException, NodeNotFoundException, ExecutionException, InterruptedException {
final IncrementalContainerReportHandler reportHandler = new IncrementalContainerReportHandler(nodeManager, containerManager, scmContext);
final ContainerReportHandler fullReportHandler = new ContainerReportHandler(nodeManager, containerManager);
final ContainerInfo container = getContainer(LifeCycleState.CLOSED);
final ContainerInfo containerTwo = getContainer(LifeCycleState.CLOSED);
final DatanodeDetails datanode = randomDatanodeDetails();
nodeManager.register(datanode, null, null);
containerStateManager.addContainer(container.getProtobuf());
containerStateManager.addContainer(containerTwo.getProtobuf());
Assert.assertEquals(0, nodeManager.getContainers(datanode).size());
final IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(container.containerID(), CLOSED, datanode.getUuidString());
final IncrementalContainerReportFromDatanode icr = new IncrementalContainerReportFromDatanode(datanode, containerReport);
final ContainerReportsProto fullReport = TestContainerReportHandler.getContainerReportsProto(containerTwo.containerID(), CLOSED, datanode.getUuidString());
final ContainerReportFromDatanode fcr = new ContainerReportFromDatanode(datanode, fullReport);
// We need to run the FCR and ICR at the same time via the executor so we
// can try to simulate the race condition.
ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(2);
try {
// test failed consistently every time (reproducing the issue).
for (int i = 0; i < 10; i++) {
Future<?> t1 = executor.submit(() -> fullReportHandler.onMessage(fcr, publisher));
Future<?> t2 = executor.submit(() -> reportHandler.onMessage(icr, publisher));
t1.get();
t2.get();
Set<ContainerID> nmContainers = nodeManager.getContainers(datanode);
if (nmContainers.contains(container.containerID())) {
// If we find "container" in the NM, then we must also have it in
// Container Manager.
Assert.assertEquals(1, containerStateManager.getContainerReplicas(container.containerID()).size());
Assert.assertEquals(2, nmContainers.size());
} else {
// If the race condition occurs as mentioned in HDDS-5249, then this
// assert should fail. We will have found nothing for "container" in
// NM, but have found something for it in ContainerManager, and that
// should not happen. It should be in both, or neither.
Assert.assertEquals(0, containerStateManager.getContainerReplicas(container.containerID()).size());
Assert.assertEquals(1, nmContainers.size());
}
Assert.assertEquals(1, containerStateManager.getContainerReplicas(containerTwo.containerID()).size());
}
} finally {
executor.shutdown();
}
}
use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode in project ozone by apache.
the class TestIncrementalContainerReportHandler method testClosingToQuasiClosed.
@Test
public void testClosingToQuasiClosed() throws IOException {
final IncrementalContainerReportHandler reportHandler = new IncrementalContainerReportHandler(nodeManager, containerManager, scmContext);
final ContainerInfo container = getContainer(LifeCycleState.CLOSING);
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
nodeManager.register(datanodeOne, null, null);
nodeManager.register(datanodeTwo, null, null);
nodeManager.register(datanodeThree, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(container.containerID(), ContainerReplicaProto.State.CLOSING, datanodeOne, datanodeTwo, datanodeThree);
containerStateManager.addContainer(container.getProtobuf());
containerReplicas.forEach(r -> containerStateManager.updateContainerReplica(container.containerID(), r));
final IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(container.containerID(), ContainerReplicaProto.State.QUASI_CLOSED, datanodeOne.getUuidString());
final IncrementalContainerReportFromDatanode icrFromDatanode = new IncrementalContainerReportFromDatanode(datanodeOne, containerReport);
reportHandler.onMessage(icrFromDatanode, publisher);
Assert.assertEquals(LifeCycleState.QUASI_CLOSED, containerManager.getContainer(container.containerID()).getState());
}
use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode in project ozone by apache.
the class TestIncrementalContainerReportHandler method testQuasiClosedToClosed.
@Test
public void testQuasiClosedToClosed() throws IOException {
final IncrementalContainerReportHandler reportHandler = new IncrementalContainerReportHandler(nodeManager, containerManager, scmContext);
final ContainerInfo container = getContainer(LifeCycleState.QUASI_CLOSED);
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
nodeManager.register(datanodeOne, null, null);
nodeManager.register(datanodeTwo, null, null);
nodeManager.register(datanodeThree, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(container.containerID(), ContainerReplicaProto.State.CLOSING, datanodeOne, datanodeTwo);
containerReplicas.addAll(getReplicas(container.containerID(), ContainerReplicaProto.State.QUASI_CLOSED, datanodeThree));
containerStateManager.addContainer(container.getProtobuf());
containerReplicas.forEach(r -> containerStateManager.updateContainerReplica(container.containerID(), r));
final IncrementalContainerReportProto containerReport = getIncrementalContainerReportProto(container.containerID(), CLOSED, datanodeThree.getUuidString());
final IncrementalContainerReportFromDatanode icr = new IncrementalContainerReportFromDatanode(datanodeOne, containerReport);
reportHandler.onMessage(icr, publisher);
Assert.assertEquals(LifeCycleState.CLOSED, containerManager.getContainer(container.containerID()).getState());
}
use of org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode in project ozone by apache.
the class StorageContainerManager method initializeEventHandlers.
private void initializeEventHandlers() {
CloseContainerEventHandler closeContainerHandler = new CloseContainerEventHandler(pipelineManager, containerManager, scmContext);
NodeReportHandler nodeReportHandler = new NodeReportHandler(scmNodeManager);
PipelineReportHandler pipelineReportHandler = new PipelineReportHandler(scmSafeModeManager, pipelineManager, scmContext, configuration);
CommandStatusReportHandler cmdStatusReportHandler = new CommandStatusReportHandler();
NewNodeHandler newNodeHandler = new NewNodeHandler(pipelineManager, scmDecommissionManager, configuration, serviceManager);
StaleNodeHandler staleNodeHandler = new StaleNodeHandler(scmNodeManager, pipelineManager, configuration);
DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager, pipelineManager, containerManager);
StartDatanodeAdminHandler datanodeStartAdminHandler = new StartDatanodeAdminHandler(scmNodeManager, pipelineManager);
ReadOnlyHealthyToHealthyNodeHandler readOnlyHealthyToHealthyNodeHandler = new ReadOnlyHealthyToHealthyNodeHandler(configuration, serviceManager);
HealthyReadOnlyNodeHandler healthyReadOnlyNodeHandler = new HealthyReadOnlyNodeHandler(scmNodeManager, pipelineManager, configuration);
ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
ContainerReportHandler containerReportHandler = new ContainerReportHandler(scmNodeManager, containerManager, scmContext, configuration);
IncrementalContainerReportHandler incrementalContainerReportHandler = new IncrementalContainerReportHandler(scmNodeManager, containerManager, scmContext);
PipelineActionHandler pipelineActionHandler = new PipelineActionHandler(pipelineManager, scmContext, configuration);
CRLStatusReportHandler crlStatusReportHandler = new CRLStatusReportHandler(certificateStore, configuration);
eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, scmNodeManager);
eventQueue.addHandler(SCMEvents.RETRIABLE_DATANODE_COMMAND, scmNodeManager);
eventQueue.addHandler(SCMEvents.NODE_REPORT, nodeReportHandler);
// Use the same executor for both ICR and FCR.
// The Executor maps the event to a thread for DN.
// Dispatcher should always dispatch FCR first followed by ICR
List<ThreadPoolExecutor> executors = FixedThreadPoolWithAffinityExecutor.initializeExecutorPool(SCMEvents.CONTAINER_REPORT.getName() + "_OR_" + SCMEvents.INCREMENTAL_CONTAINER_REPORT.getName());
EventExecutor<ContainerReportFromDatanode> containerReportExecutors = new FixedThreadPoolWithAffinityExecutor<>(EventQueue.getExecutorName(SCMEvents.CONTAINER_REPORT, containerReportHandler), executors);
EventExecutor<IncrementalContainerReportFromDatanode> incrementalReportExecutors = new FixedThreadPoolWithAffinityExecutor<>(EventQueue.getExecutorName(SCMEvents.INCREMENTAL_CONTAINER_REPORT, incrementalContainerReportHandler), executors);
eventQueue.addHandler(SCMEvents.CONTAINER_REPORT, containerReportExecutors, containerReportHandler);
eventQueue.addHandler(SCMEvents.INCREMENTAL_CONTAINER_REPORT, incrementalReportExecutors, incrementalContainerReportHandler);
eventQueue.addHandler(SCMEvents.CONTAINER_ACTIONS, actionsHandler);
eventQueue.addHandler(SCMEvents.CLOSE_CONTAINER, closeContainerHandler);
eventQueue.addHandler(SCMEvents.NEW_NODE, newNodeHandler);
eventQueue.addHandler(SCMEvents.STALE_NODE, staleNodeHandler);
eventQueue.addHandler(SCMEvents.HEALTHY_READONLY_TO_HEALTHY_NODE, readOnlyHealthyToHealthyNodeHandler);
eventQueue.addHandler(SCMEvents.HEALTHY_READONLY_NODE, healthyReadOnlyNodeHandler);
eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
eventQueue.addHandler(SCMEvents.START_ADMIN_ON_NODE, datanodeStartAdminHandler);
eventQueue.addHandler(SCMEvents.CMD_STATUS_REPORT, cmdStatusReportHandler);
eventQueue.addHandler(SCMEvents.DELETE_BLOCK_STATUS, (DeletedBlockLogImpl) scmBlockManager.getDeletedBlockLog());
eventQueue.addHandler(SCMEvents.PIPELINE_ACTIONS, pipelineActionHandler);
eventQueue.addHandler(SCMEvents.PIPELINE_REPORT, pipelineReportHandler);
eventQueue.addHandler(SCMEvents.CRL_STATUS_REPORT, crlStatusReportHandler);
}
Aggregations