Search in sources :

Example 1 with ReplicationManagerConfiguration

use of org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration in project ozone by apache.

the class TestContainerReplicationEndToEnd method init.

/**
 * Create a MiniDFSCluster for testing.
 *
 * @throws IOException
 */
@BeforeClass
public static void init() throws Exception {
    conf = new OzoneConfiguration();
    path = GenericTestUtils.getTempPath(TestContainerStateMachineFailures.class.getSimpleName());
    File baseDir = new File(path);
    baseDir.mkdirs();
    containerReportInterval = 2000;
    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, containerReportInterval, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 5 * containerReportInterval, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL, 10 * containerReportInterval, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1000, TimeUnit.SECONDS);
    DatanodeRatisServerConfig ratisServerConfig = conf.getObject(DatanodeRatisServerConfig.class);
    ratisServerConfig.setFollowerSlownessTimeout(Duration.ofSeconds(1000));
    ratisServerConfig.setNoLeaderTimeout(Duration.ofSeconds(1000));
    conf.setFromObject(ratisServerConfig);
    ReplicationManagerConfiguration replicationConf = conf.getObject(ReplicationManagerConfiguration.class);
    replicationConf.setInterval(Duration.ofMillis(containerReportInterval));
    conf.setFromObject(replicationConf);
    conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2);
    conf.setQuietMode(false);
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4).setTotalPipelineNumLimit(6).setHbInterval(200).build();
    cluster.waitForClusterToBeReady();
    cluster.getStorageContainerManager().getReplicationManager().start();
    // the easiest way to create an open container is creating a key
    client = OzoneClientFactory.getRpcClient(conf);
    objectStore = client.getObjectStore();
    xceiverClientManager = new XceiverClientManager(conf);
    volumeName = "testcontainerstatemachinefailures";
    bucketName = volumeName;
    objectStore.createVolume(volumeName);
    objectStore.getVolume(volumeName).createBucket(bucketName);
}
Also used : ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) XceiverClientManager(org.apache.hadoop.hdds.scm.XceiverClientManager) DatanodeRatisServerConfig(org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig) File(java.io.File) BeforeClass(org.junit.BeforeClass)

Example 2 with ReplicationManagerConfiguration

use of org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration in project ozone by apache.

the class TestInputStreamBase method init.

/**
 * Create a MiniDFSCluster for testing.
 * @throws IOException
 */
@Before
public void init() throws Exception {
    OzoneClientConfig config = new OzoneClientConfig();
    config.setBytesPerChecksum(BYTES_PER_CHECKSUM);
    conf.setFromObject(config);
    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
    conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, TimeUnit.SECONDS);
    conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 1);
    conf.setQuietMode(false);
    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 64, StorageUnit.MB);
    conf.set(ScmConfigKeys.OZONE_SCM_CONTAINER_LAYOUT_KEY, containerLayout.toString());
    ReplicationManagerConfiguration repConf = conf.getObject(ReplicationManagerConfiguration.class);
    repConf.setInterval(Duration.ofSeconds(1));
    conf.setFromObject(repConf);
    cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(4).setTotalPipelineNumLimit(5).setBlockSize(BLOCK_SIZE).setChunkSize(CHUNK_SIZE).setStreamBufferFlushSize(FLUSH_SIZE).setStreamBufferMaxSize(MAX_FLUSH_SIZE).setStreamBufferSizeUnit(StorageUnit.BYTES).build();
    cluster.waitForClusterToBeReady();
    // the easiest way to create an open container is creating a key
    client = OzoneClientFactory.getRpcClient(conf);
    objectStore = client.getObjectStore();
    volumeName = UUID.randomUUID().toString();
    bucketName = UUID.randomUUID().toString();
    keyString = UUID.randomUUID().toString();
    objectStore.createVolume(volumeName);
    objectStore.getVolume(volumeName).createBucket(bucketName);
}
Also used : ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) OzoneClientConfig(org.apache.hadoop.hdds.scm.OzoneClientConfig) Before(org.junit.Before)

Example 3 with ReplicationManagerConfiguration

use of org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration in project ozone by apache.

the class TestReplicationManager method setup.

@Before
public void setup() throws IOException, InterruptedException, NodeNotFoundException, InvalidStateTransitionException {
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.setTimeDuration(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, 0, TimeUnit.SECONDS);
    scmLogs = GenericTestUtils.LogCapturer.captureLogs(ReplicationManager.LOG);
    containerManager = Mockito.mock(ContainerManager.class);
    nodeManager = new SimpleMockNodeManager();
    eventQueue = new EventQueue();
    scmhaManager = MockSCMHAManager.getInstance(true);
    testDir = GenericTestUtils.getTestDir(TestContainerManagerImpl.class.getSimpleName() + UUID.randomUUID());
    conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
    dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
    pipelineManager = Mockito.mock(PipelineManager.class);
    when(pipelineManager.containsPipeline(Mockito.any(PipelineID.class))).thenReturn(true);
    containerStateManager = ContainerStateManagerImpl.newBuilder().setConfiguration(conf).setPipelineManager(pipelineManager).setRatisServer(scmhaManager.getRatisServer()).setContainerStore(SCMDBDefinition.CONTAINERS.getTable(dbStore)).setSCMDBTransactionBuffer(scmhaManager.getDBTransactionBuffer()).build();
    serviceManager = new SCMServiceManager();
    datanodeCommandHandler = new DatanodeCommandHandler();
    eventQueue.addHandler(SCMEvents.DATANODE_COMMAND, datanodeCommandHandler);
    Mockito.when(containerManager.getContainers()).thenAnswer(invocation -> {
        Set<ContainerID> ids = containerStateManager.getContainerIDs();
        List<ContainerInfo> containers = new ArrayList<>();
        for (ContainerID id : ids) {
            containers.add(containerStateManager.getContainer(id));
        }
        return containers;
    });
    Mockito.when(containerManager.getContainer(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainer(((ContainerID) invocation.getArguments()[0])));
    Mockito.when(containerManager.getContainerReplicas(Mockito.any(ContainerID.class))).thenAnswer(invocation -> containerStateManager.getContainerReplicas(((ContainerID) invocation.getArguments()[0])));
    containerPlacementPolicy = Mockito.mock(PlacementPolicy.class);
    Mockito.when(containerPlacementPolicy.chooseDatanodes(Mockito.any(), Mockito.any(), Mockito.anyInt(), Mockito.anyLong(), Mockito.anyLong())).thenAnswer(invocation -> {
        int count = (int) invocation.getArguments()[2];
        return IntStream.range(0, count).mapToObj(i -> randomDatanodeDetails()).collect(Collectors.toList());
    });
    Mockito.when(containerPlacementPolicy.validateContainerPlacement(Mockito.any(), Mockito.anyInt())).thenAnswer(invocation -> new ContainerPlacementStatusDefault(2, 2, 3));
    clock = new TestClock(Instant.now(), ZoneId.of("UTC"));
    createReplicationManager(new ReplicationManagerConfiguration());
}
Also used : HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) MockDatanodeDetails.randomDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) TimeoutException(java.util.concurrent.TimeoutException) STALE(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE) DBStore(org.apache.hadoop.hdds.utils.db.DBStore) SCMCommandProto(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto) HddsTestUtils.getContainer(org.apache.hadoop.hdds.scm.HddsTestUtils.getContainer) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) EventPublisher(org.apache.hadoop.hdds.server.events.EventPublisher) After(org.junit.After) Map(java.util.Map) SCMHAManager(org.apache.hadoop.hdds.scm.ha.SCMHAManager) HddsConfigKeys(org.apache.hadoop.hdds.HddsConfigKeys) ContainerPlacementStatusDefault(org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault) FileUtil(org.apache.hadoop.fs.FileUtil) DBStoreBuilder(org.apache.hadoop.hdds.utils.db.DBStoreBuilder) Longs(com.google.common.primitives.Longs) Set(java.util.Set) UUID(java.util.UUID) IN_SERVICE(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE) Instant(java.time.Instant) HddsTestUtils.getReplicas(org.apache.hadoop.hdds.scm.HddsTestUtils.getReplicas) Collectors(java.util.stream.Collectors) CommandForDatanode(org.apache.hadoop.ozone.protocol.commands.CommandForDatanode) ZoneId(java.time.ZoneId) PlacementPolicy(org.apache.hadoop.hdds.scm.PlacementPolicy) List(java.util.List) MoveDataNodePair(org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair) ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) SCMServiceManager(org.apache.hadoop.hdds.scm.ha.SCMServiceManager) Optional(java.util.Optional) DECOMMISSIONED(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED) GenericTestUtils(org.apache.ozone.test.GenericTestUtils) IntStream(java.util.stream.IntStream) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) InvalidStateTransitionException(org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException) MoveResult(org.apache.hadoop.hdds.scm.container.ReplicationManager.MoveResult) SCMContext(org.apache.hadoop.hdds.scm.ha.SCMContext) HashMap(java.util.HashMap) CompletableFuture(java.util.concurrent.CompletableFuture) ArrayList(java.util.ArrayList) LifeCycleState(org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState) MockDatanodeDetails.createDatanodeDetails(org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails) MockSCMHAManager(org.apache.hadoop.hdds.scm.ha.MockSCMHAManager) NodeNotFoundException(org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException) DECOMMISSIONING(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING) Before(org.junit.Before) LifeCycleEvent(org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent) HEALTHY(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY) State(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State) DatanodeDetails(org.apache.hadoop.hdds.protocol.DatanodeDetails) FileUtils(org.apache.commons.io.FileUtils) EventHandler(org.apache.hadoop.hdds.server.events.EventHandler) Test(org.junit.Test) IOException(java.io.IOException) Mockito.when(org.mockito.Mockito.when) SCMEvents(org.apache.hadoop.hdds.scm.events.SCMEvents) PipelineManager(org.apache.hadoop.hdds.scm.pipeline.PipelineManager) File(java.io.File) ExecutionException(java.util.concurrent.ExecutionException) TimeUnit(java.util.concurrent.TimeUnit) IN_MAINTENANCE(org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE) Mockito(org.mockito.Mockito) SCMDBDefinition(org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition) CLOSED(org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED) SCMDBTransactionBufferImpl(org.apache.hadoop.hdds.scm.metadata.SCMDBTransactionBufferImpl) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) Assert(org.junit.Assert) TestClock(org.apache.ozone.test.TestClock) PipelineManager(org.apache.hadoop.hdds.scm.pipeline.PipelineManager) ArrayList(java.util.ArrayList) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) ContainerPlacementStatusDefault(org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault) EventQueue(org.apache.hadoop.hdds.server.events.EventQueue) TestClock(org.apache.ozone.test.TestClock) SCMDBDefinition(org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition) PlacementPolicy(org.apache.hadoop.hdds.scm.PlacementPolicy) SCMServiceManager(org.apache.hadoop.hdds.scm.ha.SCMServiceManager) ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) PipelineID(org.apache.hadoop.hdds.scm.pipeline.PipelineID) Before(org.junit.Before)

Example 4 with ReplicationManagerConfiguration

use of org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration in project ozone by apache.

the class TestReplicationManager method testNotUnderReplicatedDueToMaintenanceMinRepOne.

/**
 * ReplicationManager should not replicate an additional replica when if
 * min replica for maintenance is 1 and another replica is available.
 */
@Test
public void testNotUnderReplicatedDueToMaintenanceMinRepOne() throws Exception {
    replicationManager.stop();
    ReplicationManagerConfiguration newConf = new ReplicationManagerConfiguration();
    newConf.setMaintenanceReplicaMinimum(1);
    dbStore.close();
    createReplicationManager(newConf);
    final ContainerInfo container = createContainer(LifeCycleState.CLOSED);
    addReplica(container, new NodeStatus(IN_SERVICE, HEALTHY), CLOSED);
    addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED);
    addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED);
    assertReplicaScheduled(0);
    assertUnderReplicatedCount(0);
}
Also used : ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) Test(org.junit.Test)

Example 5 with ReplicationManagerConfiguration

use of org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration in project ozone by apache.

the class TestReplicationManager method testUnderReplicatedDueToMaintenanceMinRepOne.

/**
 * ReplicationManager should replicate an additional replica when all copies
 * are going off line and min rep is 1.
 */
@Test
public void testUnderReplicatedDueToMaintenanceMinRepOne() throws Exception {
    replicationManager.stop();
    ReplicationManagerConfiguration newConf = new ReplicationManagerConfiguration();
    newConf.setMaintenanceReplicaMinimum(1);
    dbStore.close();
    createReplicationManager(newConf);
    final ContainerInfo container = createContainer(LifeCycleState.CLOSED);
    addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED);
    addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED);
    addReplica(container, new NodeStatus(IN_MAINTENANCE, HEALTHY), CLOSED);
    assertReplicaScheduled(1);
    assertUnderReplicatedCount(1);
}
Also used : ReplicationManagerConfiguration(org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration) NodeStatus(org.apache.hadoop.hdds.scm.node.NodeStatus) Test(org.junit.Test)

Aggregations

ReplicationManagerConfiguration (org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration)11 Test (org.junit.Test)6 OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)5 NodeStatus (org.apache.hadoop.hdds.scm.node.NodeStatus)5 Before (org.junit.Before)3 File (java.io.File)2 Longs (com.google.common.primitives.Longs)1 IOException (java.io.IOException)1 Instant (java.time.Instant)1 ZoneId (java.time.ZoneId)1 ArrayList (java.util.ArrayList)1 HashMap (java.util.HashMap)1 List (java.util.List)1 Map (java.util.Map)1 Optional (java.util.Optional)1 Set (java.util.Set)1 UUID (java.util.UUID)1 CompletableFuture (java.util.concurrent.CompletableFuture)1 ExecutionException (java.util.concurrent.ExecutionException)1 TimeUnit (java.util.concurrent.TimeUnit)1