Search in sources :

Example 21 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestReportPublisherFactory method testInvalidReportPublisher.

@Test
public void testInvalidReportPublisher() {
    OzoneConfiguration conf = new OzoneConfiguration();
    ReportPublisherFactory factory = new ReportPublisherFactory(conf);
    exception.expect(RuntimeException.class);
    exception.expectMessage("No publisher found for report");
    factory.getPublisherFor(HddsProtos.DatanodeDetailsProto.class);
}
Also used : HddsProtos(org.apache.hadoop.hdds.protocol.proto.HddsProtos) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Test(org.junit.Test)

Example 22 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestDatanodeConfiguration method overridesInvalidValues.

@Test
public void overridesInvalidValues() {
    // GIVEN
    int invalidDeleteThreads = 0;
    long invalidDiskCheckIntervalMinutes = -1;
    int invalidFailedVolumesTolerated = -2;
    long invalidDiskCheckMinGap = -1;
    long invalidDiskCheckTimeout = -1;
    OzoneConfiguration conf = new OzoneConfiguration();
    conf.setInt(CONTAINER_DELETE_THREADS_MAX_KEY, invalidDeleteThreads);
    conf.setLong(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_KEY, invalidDiskCheckIntervalMinutes);
    conf.setInt(FAILED_DATA_VOLUMES_TOLERATED_KEY, invalidFailedVolumesTolerated);
    conf.setInt(FAILED_METADATA_VOLUMES_TOLERATED_KEY, invalidFailedVolumesTolerated);
    conf.setInt(FAILED_DB_VOLUMES_TOLERATED_KEY, invalidFailedVolumesTolerated);
    conf.setTimeDuration(DISK_CHECK_MIN_GAP_KEY, invalidDiskCheckMinGap, TimeUnit.MINUTES);
    conf.setTimeDuration(DISK_CHECK_TIMEOUT_KEY, invalidDiskCheckTimeout, TimeUnit.MINUTES);
    // WHEN
    DatanodeConfiguration subject = conf.getObject(DatanodeConfiguration.class);
    // THEN
    assertEquals(CONTAINER_DELETE_THREADS_DEFAULT, subject.getContainerDeleteThreads());
    assertEquals(PERIODIC_DISK_CHECK_INTERVAL_MINUTES_DEFAULT, subject.getPeriodicDiskCheckIntervalMinutes());
    assertEquals(FAILED_VOLUMES_TOLERATED_DEFAULT, subject.getFailedDataVolumesTolerated());
    assertEquals(FAILED_VOLUMES_TOLERATED_DEFAULT, subject.getFailedMetadataVolumesTolerated());
    assertEquals(FAILED_VOLUMES_TOLERATED_DEFAULT, subject.getFailedDbVolumesTolerated());
    assertEquals(DISK_CHECK_MIN_GAP_DEFAULT, subject.getDiskCheckMinGap().toMillis());
    assertEquals(DISK_CHECK_TIMEOUT_DEFAULT, subject.getDiskCheckTimeout().toMillis());
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Test(org.junit.Test)

Example 23 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestStateContext method testContainerNodePipelineReportAPIs.

/**
 * Check if Container, Node and Pipeline report APIs work as expected.
 */
@Test
public void testContainerNodePipelineReportAPIs() {
    OzoneConfiguration conf = new OzoneConfiguration();
    DatanodeStateMachine datanodeStateMachineMock = mock(DatanodeStateMachine.class);
    // ContainerReports
    StateContext context1 = newStateContext(conf, datanodeStateMachineMock);
    assertNull(context1.getContainerReports());
    assertNull(context1.getNodeReport());
    assertNull(context1.getPipelineReports());
    GeneratedMessage containerReports = newMockReport(StateContext.CONTAINER_REPORTS_PROTO_NAME);
    context1.refreshFullReport(containerReports);
    assertNotNull(context1.getContainerReports());
    assertEquals(StateContext.CONTAINER_REPORTS_PROTO_NAME, context1.getContainerReports().getDescriptorForType().getFullName());
    assertNull(context1.getNodeReport());
    assertNull(context1.getPipelineReports());
    // NodeReport
    StateContext context2 = newStateContext(conf, datanodeStateMachineMock);
    GeneratedMessage nodeReport = newMockReport(StateContext.NODE_REPORT_PROTO_NAME);
    context2.refreshFullReport(nodeReport);
    assertNull(context2.getContainerReports());
    assertNotNull(context2.getNodeReport());
    assertEquals(StateContext.NODE_REPORT_PROTO_NAME, context2.getNodeReport().getDescriptorForType().getFullName());
    assertNull(context2.getPipelineReports());
    // PipelineReports
    StateContext context3 = newStateContext(conf, datanodeStateMachineMock);
    GeneratedMessage pipelineReports = newMockReport(StateContext.PIPELINE_REPORTS_PROTO_NAME);
    context3.refreshFullReport(pipelineReports);
    assertNull(context3.getContainerReports());
    assertNull(context3.getNodeReport());
    assertNotNull(context3.getPipelineReports());
    assertEquals(StateContext.PIPELINE_REPORTS_PROTO_NAME, context3.getPipelineReports().getDescriptorForType().getFullName());
}
Also used : OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) GeneratedMessage(com.google.protobuf.GeneratedMessage) Test(org.junit.Test)

Example 24 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestStateContext method testReportAPIs.

@Test
public void testReportAPIs() {
    OzoneConfiguration conf = new OzoneConfiguration();
    DatanodeStateMachine datanodeStateMachineMock = mock(DatanodeStateMachine.class);
    StateContext stateContext = new StateContext(conf, DatanodeStates.getInitState(), datanodeStateMachineMock);
    InetSocketAddress scm1 = new InetSocketAddress("scm1", 9001);
    InetSocketAddress scm2 = new InetSocketAddress("scm2", 9001);
    GeneratedMessage generatedMessage = newMockReport(StateContext.COMMAND_STATUS_REPORTS_PROTO_NAME);
    // Try to add report with zero endpoint. Should not be stored.
    stateContext.addIncrementalReport(generatedMessage);
    assertTrue(stateContext.getAllAvailableReports(scm1).isEmpty());
    // Add 2 scm endpoints.
    stateContext.addEndpoint(scm1);
    stateContext.addEndpoint(scm2);
    // Add report. Should be added to all endpoints.
    stateContext.addIncrementalReport(generatedMessage);
    List<GeneratedMessage> allAvailableReports = stateContext.getAllAvailableReports(scm1);
    assertEquals(1, allAvailableReports.size());
    assertEquals(1, stateContext.getAllAvailableReports(scm2).size());
    // Assert the reports are no longer available.
    assertTrue(stateContext.getAllAvailableReports(scm1).isEmpty());
    // Put back reports.
    stateContext.putBackReports(allAvailableReports, scm1);
    assertFalse(stateContext.getAllAvailableReports(scm1).isEmpty());
}
Also used : InetSocketAddress(java.net.InetSocketAddress) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) GeneratedMessage(com.google.protobuf.GeneratedMessage) Test(org.junit.Test)

Example 25 with OzoneConfiguration

use of org.apache.hadoop.hdds.conf.OzoneConfiguration in project ozone by apache.

the class TestStateContext method testPutBackReports.

/**
 * Only accepted types of reports can be put back to the report queue.
 */
@Test
public void testPutBackReports() {
    OzoneConfiguration conf = new OzoneConfiguration();
    DatanodeStateMachine datanodeStateMachineMock = mock(DatanodeStateMachine.class);
    StateContext ctx = new StateContext(conf, DatanodeStates.getInitState(), datanodeStateMachineMock);
    InetSocketAddress scm1 = new InetSocketAddress("scm1", 9001);
    ctx.addEndpoint(scm1);
    InetSocketAddress scm2 = new InetSocketAddress("scm2", 9001);
    ctx.addEndpoint(scm2);
    Map<String, Integer> expectedReportCount = new HashMap<>();
    // Case 1: Put back an incremental report
    ctx.putBackReports(Collections.singletonList(newMockReport(StateContext.COMMAND_STATUS_REPORTS_PROTO_NAME)), scm1);
    // scm2 report queue should be empty
    checkReportCount(ctx.getAllAvailableReports(scm2), expectedReportCount);
    // Check scm1 queue
    expectedReportCount.put(StateContext.COMMAND_STATUS_REPORTS_PROTO_NAME, 1);
    checkReportCount(ctx.getAllAvailableReports(scm1), expectedReportCount);
    // getReports dequeues incremental reports
    expectedReportCount.clear();
    ctx.putBackReports(Collections.singletonList(newMockReport(StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME)), scm2);
    // scm1 report queue should be empty
    checkReportCount(ctx.getAllAvailableReports(scm1), expectedReportCount);
    // Check scm2 queue
    expectedReportCount.put(StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME, 1);
    checkReportCount(ctx.getAllAvailableReports(scm2), expectedReportCount);
    // getReports dequeues incremental reports
    expectedReportCount.clear();
    // Case 2: Put back mixed types of incremental reports
    ctx.putBackReports(Arrays.asList(newMockReport(StateContext.COMMAND_STATUS_REPORTS_PROTO_NAME), newMockReport(StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME), newMockReport(StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME), newMockReport(StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME), newMockReport(StateContext.COMMAND_STATUS_REPORTS_PROTO_NAME)), scm1);
    // scm2 report queue should be empty
    checkReportCount(ctx.getAllAvailableReports(scm2), expectedReportCount);
    // Check scm1 queue
    expectedReportCount.put(StateContext.COMMAND_STATUS_REPORTS_PROTO_NAME, 2);
    expectedReportCount.put(StateContext.INCREMENTAL_CONTAINER_REPORT_PROTO_NAME, 3);
    checkReportCount(ctx.getAllAvailableReports(scm1), expectedReportCount);
    // getReports dequeues incremental reports
    expectedReportCount.clear();
}
Also used : AtomicInteger(java.util.concurrent.atomic.AtomicInteger) HashMap(java.util.HashMap) InetSocketAddress(java.net.InetSocketAddress) OzoneConfiguration(org.apache.hadoop.hdds.conf.OzoneConfiguration) Test(org.junit.Test)

Aggregations

OzoneConfiguration (org.apache.hadoop.hdds.conf.OzoneConfiguration)691 Test (org.junit.Test)269 Before (org.junit.Before)116 File (java.io.File)80 BeforeClass (org.junit.BeforeClass)75 DatanodeDetails (org.apache.hadoop.hdds.protocol.DatanodeDetails)70 IOException (java.io.IOException)56 Test (org.junit.jupiter.api.Test)50 ArrayList (java.util.ArrayList)49 OmMetadataManagerImpl (org.apache.hadoop.ozone.om.OmMetadataManagerImpl)35 MockDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails)34 BeforeEach (org.junit.jupiter.api.BeforeEach)34 MockDatanodeDetails.randomDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails)32 UUID (java.util.UUID)29 InetSocketAddress (java.net.InetSocketAddress)27 ECReplicationConfig (org.apache.hadoop.hdds.client.ECReplicationConfig)25 ReplicationConfig (org.apache.hadoop.hdds.client.ReplicationConfig)23 Pipeline (org.apache.hadoop.hdds.scm.pipeline.Pipeline)23 MockDatanodeDetails.createDatanodeDetails (org.apache.hadoop.hdds.protocol.MockDatanodeDetails.createDatanodeDetails)22 HDDSLayoutVersionManager (org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager)22