use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class AbstractReconContainerManagerTest method getTestContainer.
protected ContainerWithPipeline getTestContainer(LifeCycleState state) throws IOException {
ContainerID containerID = ContainerID.valueOf(100L);
Pipeline pipeline = getRandomPipeline();
pipelineManager.addPipeline(pipeline);
ContainerInfo containerInfo = new ContainerInfo.Builder().setContainerID(containerID.getId()).setNumberOfKeys(10).setPipelineID(pipeline.getId()).setReplicationConfig(StandaloneReplicationConfig.getInstance(ONE)).setOwner("test").setState(state).build();
return new ContainerWithPipeline(containerInfo, pipeline);
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class TestReconContainerManager method testUpdateAndRemoveContainerReplica.
@Test
public void testUpdateAndRemoveContainerReplica() throws IOException {
// Sanity checking updateContainerReplica and ContainerReplicaHistory
// Init Container 1
final long cIDlong1 = 1L;
final ContainerID containerID1 = ContainerID.valueOf(cIDlong1);
// Init DN01
final UUID uuid1 = UUID.randomUUID();
final DatanodeDetails datanodeDetails1 = DatanodeDetails.newBuilder().setUuid(uuid1).setHostName("host1").setIpAddress("127.0.0.1").build();
ContainerReplica containerReplica1 = ContainerReplica.newBuilder().setContainerID(containerID1).setContainerState(State.OPEN).setDatanodeDetails(datanodeDetails1).setSequenceId(1001L).build();
final ReconContainerManager containerManager = getContainerManager();
final Map<Long, Map<UUID, ContainerReplicaHistory>> repHistMap = containerManager.getReplicaHistoryMap();
// Should be empty at the beginning
Assert.assertEquals(0, repHistMap.size());
// Put a replica info and call updateContainerReplica
Pipeline pipeline = getRandomPipeline();
getPipelineManager().addPipeline(pipeline);
for (int i = 1; i <= 10; i++) {
final ContainerInfo info = newContainerInfo(i, pipeline);
containerManager.addNewContainer(new ContainerWithPipeline(info, pipeline));
}
containerManager.updateContainerReplica(containerID1, containerReplica1);
// Should have 1 container entry in the replica history map
Assert.assertEquals(1, repHistMap.size());
// Should only have 1 entry for this replica (on DN01)
Assert.assertEquals(1, repHistMap.get(cIDlong1).size());
ContainerReplicaHistory repHist1 = repHistMap.get(cIDlong1).get(uuid1);
Assert.assertEquals(uuid1, repHist1.getUuid());
// Because this is a new entry, first seen time equals last seen time
assertEquals(repHist1.getLastSeenTime(), repHist1.getFirstSeenTime());
assertEquals(containerReplica1.getSequenceId().longValue(), repHist1.getBcsId());
// Let's update the entry again
containerReplica1 = ContainerReplica.newBuilder().setContainerID(containerID1).setContainerState(State.OPEN).setDatanodeDetails(datanodeDetails1).setSequenceId(1051L).build();
containerManager.updateContainerReplica(containerID1, containerReplica1);
// Should still have 1 entry in the replica history map
Assert.assertEquals(1, repHistMap.size());
// Now last seen time should be larger than first seen time
Assert.assertTrue(repHist1.getLastSeenTime() > repHist1.getFirstSeenTime());
assertEquals(1051L, repHist1.getBcsId());
// Init DN02
final UUID uuid2 = UUID.randomUUID();
final DatanodeDetails datanodeDetails2 = DatanodeDetails.newBuilder().setUuid(uuid2).setHostName("host2").setIpAddress("127.0.0.2").build();
final ContainerReplica containerReplica2 = ContainerReplica.newBuilder().setContainerID(containerID1).setContainerState(State.OPEN).setDatanodeDetails(datanodeDetails2).setSequenceId(1051L).build();
// Add replica to DN02
containerManager.updateContainerReplica(containerID1, containerReplica2);
// Should still have 1 container entry in the replica history map
Assert.assertEquals(1, repHistMap.size());
// Should have 2 entries for this replica (on DN01 and DN02)
Assert.assertEquals(2, repHistMap.get(cIDlong1).size());
ContainerReplicaHistory repHist2 = repHistMap.get(cIDlong1).get(uuid2);
Assert.assertEquals(uuid2, repHist2.getUuid());
// Because this is a new entry, first seen time equals last seen time
assertEquals(repHist2.getLastSeenTime(), repHist2.getFirstSeenTime());
assertEquals(1051L, repHist2.getBcsId());
// Remove replica from DN01
containerManager.removeContainerReplica(containerID1, containerReplica1);
// Should still have 1 container entry in the replica history map
Assert.assertEquals(1, repHistMap.size());
// Should have 1 entry for this replica
Assert.assertEquals(1, repHistMap.get(cIDlong1).size());
// And the only entry should match DN02
Assert.assertEquals(uuid2, repHistMap.get(cIDlong1).keySet().iterator().next());
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class TestReconPipelineReportHandler method testProcessPipelineReport.
@Test
public void testProcessPipelineReport() throws IOException {
// Check with pipeline which does not exist in Recon.
Pipeline pipeline = getRandomPipeline();
PipelineID pipelineID = pipeline.getId();
HddsProtos.PipelineID pipelineIDProto = pipelineID.getProtobuf();
ReconPipelineManager reconPipelineManagerMock = mock(ReconPipelineManager.class);
when(reconPipelineManagerMock.getPipeline(pipelineID)).thenReturn(pipeline);
StorageContainerServiceProvider scmServiceProviderMock = mock(StorageContainerServiceProvider.class);
when(scmServiceProviderMock.getPipeline(pipelineIDProto)).thenReturn(pipeline);
OzoneConfiguration configuration = new OzoneConfiguration();
ReconPipelineReportHandler handler = new ReconPipelineReportHandler(new ReconSafeModeManager(), reconPipelineManagerMock, SCMContext.emptyContext(), configuration, scmServiceProviderMock);
EventPublisher eventPublisherMock = mock(EventPublisher.class);
PipelineReport report = mock(PipelineReport.class);
when(report.getPipelineID()).thenReturn(pipelineIDProto);
handler.processPipelineReport(report, pipeline.getNodes().get(0), eventPublisherMock);
// Verify that the new pipeline was added to pipeline manager.
verify(reconPipelineManagerMock, times(1)).addPipeline(pipeline);
verify(reconPipelineManagerMock, times(1)).getPipeline(pipelineID);
// Check with pipeline which already exists in Recon.
pipeline = getRandomPipeline();
pipelineID = pipeline.getId();
pipelineIDProto = pipelineID.getProtobuf();
when(reconPipelineManagerMock.containsPipeline(pipelineID)).thenReturn(true);
when(reconPipelineManagerMock.getPipeline(pipelineID)).thenReturn(pipeline);
when(report.getPipelineID()).thenReturn(pipelineIDProto);
handler.processPipelineReport(report, pipeline.getNodes().get(0), eventPublisherMock);
// Verify that the pipeline was not added to pipeline manager.
verify(reconPipelineManagerMock, times(0)).addPipeline(pipeline);
verify(reconPipelineManagerMock, times(1)).getPipeline(pipelineID);
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class ReconPipelineManager method initializePipelines.
/**
* Bootstrap Recon's pipeline metadata with that from SCM.
* @param pipelinesFromScm pipelines from SCM.
* @throws IOException on exception.
*/
void initializePipelines(List<Pipeline> pipelinesFromScm) throws IOException {
acquireWriteLock();
try {
List<Pipeline> pipelinesInHouse = getPipelines();
LOG.info("Recon has {} pipelines in house.", pipelinesInHouse.size());
for (Pipeline pipeline : pipelinesFromScm) {
if (!containsPipeline(pipeline.getId())) {
// New pipeline got from SCM. Recon does not know anything about it,
// so let's add it.
LOG.info("Adding new pipeline {} from SCM.", pipeline.getId());
addPipeline(pipeline);
} else {
// Recon already has this pipeline. Just update state and creation
// time.
getStateManager().updatePipelineState(pipeline.getId().getProtobuf(), Pipeline.PipelineState.getProtobuf(pipeline.getPipelineState()));
getPipeline(pipeline.getId()).setCreationTimestamp(pipeline.getCreationTimestamp());
}
removeInvalidPipelines(pipelinesFromScm);
}
} finally {
releaseWriteLock();
}
}
use of org.apache.hadoop.hdds.scm.pipeline.Pipeline in project ozone by apache.
the class ReconPipelineManager method removeInvalidPipelines.
public void removeInvalidPipelines(List<Pipeline> pipelinesFromScm) {
acquireWriteLock();
try {
List<Pipeline> pipelinesInHouse = getPipelines();
// Removing pipelines in Recon that are no longer in SCM.
// TODO Recon may need to track inactive pipelines as well. So this can be
// removed in a followup JIRA.
List<Pipeline> invalidPipelines = pipelinesInHouse.stream().filter(p -> !pipelinesFromScm.contains(p)).collect(Collectors.toList());
invalidPipelines.forEach(p -> {
PipelineID pipelineID = p.getId();
if (!p.getPipelineState().equals(CLOSED)) {
try {
getStateManager().updatePipelineState(pipelineID.getProtobuf(), HddsProtos.PipelineState.PIPELINE_CLOSED);
} catch (IOException e) {
LOG.warn("Pipeline {} not found while updating state. ", p.getId(), e);
}
}
try {
LOG.info("Removing invalid pipeline {} from Recon.", pipelineID);
closePipeline(p, false);
} catch (IOException e) {
LOG.warn("Unable to remove pipeline {}", pipelineID, e);
}
});
} finally {
releaseWriteLock();
}
}
Aggregations