Search in sources :

Example 66 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class CompactSegmentsTest method createSegment.

private DataSegment createSegment(String dataSource, int startDay, boolean beforeNoon, int partition) {
    final ShardSpec shardSpec = shardSpecFactory.apply(partition, 2);
    final Interval interval = beforeNoon ? Intervals.of(StringUtils.format("2017-01-%02dT00:00:00/2017-01-%02dT12:00:00", startDay + 1, startDay + 1)) : Intervals.of(StringUtils.format("2017-01-%02dT12:00:00/2017-01-%02dT00:00:00", startDay + 1, startDay + 2));
    return new DataSegment(dataSource, interval, "version", null, ImmutableList.of(), ImmutableList.of(), shardSpec, 0, 10L);
}
Also used : DataSegment(org.apache.druid.timeline.DataSegment) HashBasedNumberedShardSpec(org.apache.druid.timeline.partition.HashBasedNumberedShardSpec) ShardSpec(org.apache.druid.timeline.partition.ShardSpec) NumberedShardSpec(org.apache.druid.timeline.partition.NumberedShardSpec) SingleDimensionShardSpec(org.apache.druid.timeline.partition.SingleDimensionShardSpec) Interval(org.joda.time.Interval)

Example 67 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class RunRulesTest method testRulesRunOnNonOvershadowedSegmentsOnly.

@Test
public void testRulesRunOnNonOvershadowedSegmentsOnly() {
    Set<DataSegment> usedSegments = new HashSet<>();
    DataSegment v1 = new DataSegment("test", Intervals.of("2012-01-01/2012-01-02"), "1", new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), IndexIO.CURRENT_VERSION_ID, 1);
    DataSegment v2 = new DataSegment("test", Intervals.of("2012-01-01/2012-01-02"), "2", new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), IndexIO.CURRENT_VERSION_ID, 1);
    usedSegments.add(v1);
    usedSegments.add(v2);
    mockCoordinator();
    mockPeon.loadSegment(EasyMock.eq(v2), EasyMock.anyObject());
    EasyMock.expectLastCall().once();
    mockEmptyPeon();
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Collections.singletonList(new ForeverLoadRule(ImmutableMap.of(DruidServer.DEFAULT_TIER, 1)))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier(DruidServer.DEFAULT_TIER, new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).toImmutableDruidServer(), mockPeon)).build();
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = CoordinatorRuntimeParamsTestHelpers.newBuilder().withDruidCluster(druidCluster).withUsedSegmentsInTest(usedSegments).withDatabaseRuleManager(databaseRuleManager).withSegmentReplicantLookup(SegmentReplicantLookup.make(new DruidCluster(), false)).withBalancerStrategy(balancerStrategy).withDynamicConfigs(CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(5).build()).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertEquals(1, stats.getTiers("assignedCount").size());
    Assert.assertEquals(1, stats.getTieredStat("assignedCount", "_default_tier"));
    Assert.assertTrue(stats.getTiers("unassignedCount").isEmpty());
    Assert.assertTrue(stats.getTiers("unassignedSize").isEmpty());
    Assert.assertEquals(2, usedSegments.size());
    Assert.assertEquals(usedSegments, params.getUsedSegments());
    Assert.assertEquals(usedSegments, afterParams.getUsedSegments());
    EasyMock.verify(mockPeon);
    exec.shutdown();
}
Also used : DruidServer(org.apache.druid.client.DruidServer) DataSegment(org.apache.druid.timeline.DataSegment) ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) HashSet(java.util.HashSet) Test(org.junit.Test)

Example 68 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class RunRulesTest method testTwoNodesOneTierThreeReplicantsRandomStrategyNotEnoughNodes.

/**
 * Tier - __default_tier
 * Nodes - 2
 * Replicants - 3
 * Random balancer strategy should not assign anything and not get into loop as there are not enough nodes for replication
 */
@Test(timeout = 5000L)
public void testTwoNodesOneTierThreeReplicantsRandomStrategyNotEnoughNodes() {
    mockCoordinator();
    mockEmptyPeon();
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Collections.singletonList(new ForeverLoadRule(ImmutableMap.of(DruidServer.DEFAULT_TIER, 3)))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DataSegment dataSegment = new DataSegment("test", Intervals.utc(0, 1), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), IndexIO.CURRENT_VERSION_ID, 1);
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier(DruidServer.DEFAULT_TIER, new ServerHolder(new DruidServer("server1", "host1", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(dataSegment).toImmutableDruidServer(), mockPeon), new ServerHolder(new DruidServer("server2", "host2", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(dataSegment).toImmutableDruidServer(), mockPeon)).build();
    RandomBalancerStrategy balancerStrategy = new RandomBalancerStrategy();
    DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, balancerStrategy, Collections.singletonList(dataSegment)).withDynamicConfigs(CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(5).build()).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertEquals(0L, stats.getTieredStat("assignedCount", DruidServer.DEFAULT_TIER));
    Assert.assertTrue(stats.getTiers("unassignedCount").isEmpty());
    Assert.assertTrue(stats.getTiers("unassignedSize").isEmpty());
    EasyMock.verify(mockPeon);
}
Also used : ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) DruidServer(org.apache.druid.client.DruidServer) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 69 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class RunRulesTest method testDontDropInDifferentTiers.

@Test
public void testDontDropInDifferentTiers() {
    mockCoordinator();
    mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    mockEmptyPeon();
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("hot", 1)), new IntervalDropRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z")))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DruidServer server1 = new DruidServer("server1", "host1", null, 1000, ServerType.HISTORICAL, "hot", 0);
    DruidServer server2 = new DruidServer("serverNorm2", "hostNorm2", null, 1000, ServerType.HISTORICAL, "normal", 0);
    for (DataSegment segment : usedSegments) {
        server2.addDataSegment(segment);
    }
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(server1.toImmutableDruidServer(), mockPeon)).addTier("normal", new ServerHolder(server2.toImmutableDruidServer(), mockPeon)).build();
    SegmentReplicantLookup segmentReplicantLookup = SegmentReplicantLookup.make(druidCluster, false);
    ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
    BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
    DruidCoordinatorRuntimeParams params = createCoordinatorRuntimeParams(druidCluster).withDynamicConfigs(COORDINATOR_CONFIG_WITH_ZERO_LEADING_TIME_BEFORE_CAN_MARK_AS_UNUSED_OVERSHADOWED_SEGMENTS).withSegmentReplicantLookup(segmentReplicantLookup).withBalancerStrategy(balancerStrategy).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertTrue(stats.getTiers("droppedCount").isEmpty());
    Assert.assertEquals(12L, stats.getGlobalStat("deletedCount"));
    exec.shutdown();
    EasyMock.verify(mockPeon);
}
Also used : IntervalLoadRule(org.apache.druid.server.coordinator.rules.IntervalLoadRule) DruidServer(org.apache.druid.client.DruidServer) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) IntervalDropRule(org.apache.druid.server.coordinator.rules.IntervalDropRule) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 70 with DataSegment

use of org.apache.druid.timeline.DataSegment in project druid by druid-io.

the class RunRulesTest method testOneNodesOneTierOneReplicantRandomStrategyEnoughSpace.

/**
 * Tier - __default_tier
 * Nodes - 1
 * Replicants - 1
 * Random balancer strategy should select the only node
 */
@Test(timeout = 5000L)
public void testOneNodesOneTierOneReplicantRandomStrategyEnoughSpace() {
    mockCoordinator();
    mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    mockEmptyPeon();
    EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Collections.singletonList(new ForeverLoadRule(ImmutableMap.of(DruidServer.DEFAULT_TIER, 1)))).atLeastOnce();
    EasyMock.replay(databaseRuleManager);
    DataSegment dataSegment = new DataSegment("test", Intervals.utc(0, 1), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), IndexIO.CURRENT_VERSION_ID, 1);
    DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier(DruidServer.DEFAULT_TIER, new ServerHolder(new DruidServer("server1", "host1", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).toImmutableDruidServer(), mockPeon)).build();
    RandomBalancerStrategy balancerStrategy = new RandomBalancerStrategy();
    DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, balancerStrategy, Collections.singletonList(dataSegment)).withDynamicConfigs(CoordinatorDynamicConfig.builder().withMaxSegmentsToMove(5).build()).build();
    DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
    CoordinatorStats stats = afterParams.getCoordinatorStats();
    Assert.assertEquals(1L, stats.getTieredStat("assignedCount", DruidServer.DEFAULT_TIER));
    Assert.assertTrue(stats.getTiers("unassignedCount").isEmpty());
    Assert.assertTrue(stats.getTiers("unassignedSize").isEmpty());
    EasyMock.verify(mockPeon);
}
Also used : ForeverLoadRule(org.apache.druid.server.coordinator.rules.ForeverLoadRule) DruidServer(org.apache.druid.client.DruidServer) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Aggregations

DataSegment (org.apache.druid.timeline.DataSegment)612 Test (org.junit.Test)386 ArrayList (java.util.ArrayList)161 Interval (org.joda.time.Interval)158 File (java.io.File)138 Map (java.util.Map)110 List (java.util.List)108 ImmutableList (com.google.common.collect.ImmutableList)77 IOException (java.io.IOException)77 HashMap (java.util.HashMap)74 ImmutableMap (com.google.common.collect.ImmutableMap)72 NumberedShardSpec (org.apache.druid.timeline.partition.NumberedShardSpec)68 HashSet (java.util.HashSet)58 TaskStatus (org.apache.druid.indexer.TaskStatus)53 Collectors (java.util.stream.Collectors)52 Set (java.util.Set)50 CountDownLatch (java.util.concurrent.CountDownLatch)50 ISE (org.apache.druid.java.util.common.ISE)50 SegmentId (org.apache.druid.timeline.SegmentId)47 LinearShardSpec (org.apache.druid.timeline.partition.LinearShardSpec)45