Search in sources :

Example 71 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class RunRulesTest method testDropTooManyInSameTier.

@Test
public void testDropTooManyInSameTier() {
    mockCoordinator();
    mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    mockEmptyPeon();
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("normal", 1)), new IntervalDropRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidServer server1 = new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, "normal", 0);
    server1.addDataSegment(usedSegments.get(0));
    DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", null, 1000, ServerType.HISTORICAL, "normal", 0);
    for (DataSegment segment : usedSegments) {
        server2.addDataSegment(segment);
    }
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("normal", new ServerHolder(server1.toImmutableDruidServer(), mockPeon), new ServerHolder(server2.toImmutableDruidServer(), mockPeon)).build();
    SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster, false);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = createCoordinatorRuntimeParams(druidCluster).withDynamicConfigs(CoordinatorDynamicConfig.builder().withLeadingTimeMillisBeforeCanMarkAsUnusedOvershadowedSegments(0L).build()).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertEquals(1L, stats.getTieredStat("droppedCount", "normal"));
    Assert.assertEquals(12L, stats.getGlobalStat("deletedCount"));
    exec.shutdown();
    EasyMock.verify(mockPeon);
}
Also used : IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) DruidServer(org.apache.druid.client.DruidServer) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 72 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class RunRulesTest method testDropRemove.

@Test
public void testDropRemove() {
    mockPeon.dropSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    mockEmptyPeon();
    EasyMock.expect(coordinator.getDynamicConfigs()).andReturn(createCoordinatorDynamicConfig()).anyTimes();
    coordinator.markSegmentAsUnused(EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    EasyMock.replay(coordinator);
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("normal", 1)), new IntervalDropRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidServer server = new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, "normal", 0);
    for (DataSegment segment : usedSegments) {
        server.addDataSegment(segment);
    }
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("normal", new ServerHolder(server.toImmutableDruidServer(), mockPeon)).build();
    SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster, false);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = createCoordinatorRuntimeParams(druidCluster).withDynamicConfigs(COORDINATOR_CONFIG_WITH_ZERO_LEADING_TIME_BEFORE_CAN_MARK_AS_UNUSED_OVERSHADOWED_SEGMENTS).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertEquals(12L, stats.getGlobalStat("deletedCount"));
    exec.shutdown();
    EasyMock.verify(coordinator);
}
Also used : IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) DruidServer(org.apache.druid.client.DruidServer) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 73 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class BroadcastDistributionRuleTest method setUp.

@Before
public void setUp() {
    smallSegment = new DataSegment("small_source", Intervals.of("0/1000"), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 0);
    for (int i = 0; i < 3; i++) {
        largeSegments.add(new DataSegment("large_source", Intervals.of((i * 1000) + "/" + ((i + 1) * 1000)), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 100));
    }
    for (int i = 0; i < 2; i++) {
        largeSegments2.add(new DataSegment("large_source2", Intervals.of((i * 1000) + "/" + ((i + 1) * 1000)), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 100));
    }
    holderOfSmallSegment = new ServerHolder(new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(smallSegment).toImmutableDruidServer(), new LoadQueuePeonTester());
    holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverHot1", "hostHot1", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(largeSegments.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverNorm1", "hostNorm1", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverNorm2", "hostNorm2", null, 100, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments.get(2)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    holdersOfLargeSegments2.add(new ServerHolder(new DruidServer("serverHot3", "hostHot3", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(largeSegments2.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    holdersOfLargeSegments2.add(new ServerHolder(new DruidServer("serverNorm3", "hostNorm3", null, 100, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments2.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester()));
    activeServer = new ServerHolder(new DruidServer("active", "host1", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(largeSegments.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester());
    decommissioningServer1 = new ServerHolder(new DruidServer("decommissioning1", "host2", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(smallSegment).toImmutableDruidServer(), new LoadQueuePeonTester(), true);
    decommissioningServer2 = new ServerHolder(new DruidServer("decommissioning2", "host3", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(largeSegments.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester(), true);
    druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", holdersOfLargeSegments.get(0), holderOfSmallSegment, holdersOfLargeSegments2.get(0)).addTier(DruidServer.DEFAULT_TIER, holdersOfLargeSegments.get(1), holdersOfLargeSegments.get(2), holdersOfLargeSegments2.get(1)).build();
    secondCluster = DruidClusterBuilder.newBuilder().addTier("tier1", activeServer, decommissioningServer1, decommissioningServer2).build();
}
Also used : ServerHolder(org.apache.druid.server.coordinator.ServerHolder) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) DruidServer(org.apache.druid.client.DruidServer) DataSegment(org.apache.druid.timeline.DataSegment) LoadQueuePeonTester(org.apache.druid.server.coordinator.LoadQueuePeonTester) Before(org.junit.Before)

Example 74 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class IntervalsResourceTest method setUp.

@Before
public void setUp() {
    inventoryView = EasyMock.createStrictMock(InventoryView.class);
    server = EasyMock.createStrictMock(DruidServer.class);
    request = EasyMock.createStrictMock(HttpServletRequest.class);
    dataSegmentList = new ArrayList<>();
    dataSegmentList.add(new DataSegment("datasource1", Intervals.of("2010-01-01T00:00:00.000Z/P1D"), "", null, null, null, null, 0x9, 20));
    dataSegmentList.add(new DataSegment("datasource1", Intervals.of("2010-01-22T00:00:00.000Z/P1D"), "", null, null, null, null, 0x9, 10));
    dataSegmentList.add(new DataSegment("datasource2", Intervals.of("2010-01-01T00:00:00.000Z/P1D"), "", null, null, null, null, 0x9, 5));
    server = new DruidServer("who", "host", null, 1234, ServerType.HISTORICAL, "tier1", 0);
    server.addDataSegment(dataSegmentList.get(0));
    server.addDataSegment(dataSegmentList.get(1));
    server.addDataSegment(dataSegmentList.get(2));
}
Also used : HttpServletRequest(javax.servlet.http.HttpServletRequest) InventoryView(org.apache.druid.client.InventoryView) DruidServer(org.apache.druid.client.DruidServer) DataSegment(org.apache.druid.timeline.DataSegment) Before(org.junit.Before)

Example 75 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class SegmentsCostCacheTest method twoSegmentsCostTest.

@Test
public void twoSegmentsCostTest() {
    DataSegment segmentA = createSegment(DATA_SOURCE, shifted1HInterval(REFERENCE_TIME, 0), 100);
    DataSegment segmentB = createSegment(DATA_SOURCE, shifted1HInterval(REFERENCE_TIME, -2), 100);
    SegmentsCostCache.Bucket.Builder prototype = SegmentsCostCache.Bucket.builder(new Interval(REFERENCE_TIME.minusHours(5), REFERENCE_TIME.plusHours(5)));
    prototype.addSegment(segmentA);
    SegmentsCostCache.Bucket bucket = prototype.build();
    double segmentCost = bucket.cost(segmentB);
    Assert.assertEquals(7.8735899489011E-4, segmentCost, EPSILON);
}
Also used : DataSegment(org.apache.druid.timeline.DataSegment) Interval(org.joda.time.Interval) Test(org.junit.Test)

Aggregations

DataSegment (org.apache.druid.timeline.DataSegment)612 Test (org.junit.Test)386 ArrayList (java.util.ArrayList)161 Interval (org.joda.time.Interval)158 File (java.io.File)138 Map (java.util.Map)110 List (java.util.List)108 ImmutableList (com.google.common.collect.ImmutableList)77 IOException (java.io.IOException)77 HashMap (java.util.HashMap)74 ImmutableMap (com.google.common.collect.ImmutableMap)72 NumberedShardSpec (org.apache.druid.timeline.partition.NumberedShardSpec)68 HashSet (java.util.HashSet)58 TaskStatus (org.apache.druid.indexer.TaskStatus)53 Collectors (java.util.stream.Collectors)52 Set (java.util.Set)50 CountDownLatch (java.util.concurrent.CountDownLatch)50 ISE (org.apache.druid.java.util.common.ISE)50 SegmentId (org.apache.druid.timeline.SegmentId)47 LinearShardSpec (org.apache.druid.timeline.partition.LinearShardSpec)45