use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class RunRulesTest method testRunTwoTiersTwoReplicants.
/**
* Nodes:
* hot - 2 replicants
* cold - 1 replicant
*/
@Test
public void testRunTwoTiersTwoReplicants() {
mockCoordinator();
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T06:00:00.000Z"), ImmutableMap.of("hot", 2)), new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z"), ImmutableMap.of("cold", 1)))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", new ServerHolder(new DruidServer("serverHot", "hostHot", null, 1000, ServerType.HISTORICAL, "hot", 0).toImmutableDruidServer(), mockPeon), new ServerHolder(new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 0).toImmutableDruidServer(), mockPeon)).addTier("cold", new ServerHolder(new DruidServer("serverCold", "hostCold", null, 1000, ServerType.HISTORICAL, "cold", 0).toImmutableDruidServer(), mockPeon)).build();
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, balancerStrategy).build();
DruidCoordinatorRuntimeParams afterParams = ruleRunner.run(params);
CoordinatorStats stats = afterParams.getCoordinatorStats();
Assert.assertEquals(12L, stats.getTieredStat("assignedCount", "hot"));
Assert.assertEquals(18L, stats.getTieredStat("assignedCount", "cold"));
Assert.assertTrue(stats.getTiers("unassignedCount").isEmpty());
Assert.assertTrue(stats.getTiers("unassignedSize").isEmpty());
exec.shutdown();
EasyMock.verify(mockPeon);
}
use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class RunRulesTest method testRunTwoTiersTierDoesNotExist.
@Test
public void testRunTwoTiersTierDoesNotExist() {
mockCoordinator();
mockPeon.loadSegment(EasyMock.anyObject(), EasyMock.anyObject());
EasyMock.expectLastCall().atLeastOnce();
mockEmptyPeon();
emitter.emit(EasyMock.<ServiceEventBuilder>anyObject());
EasyMock.expectLastCall().times(12);
EasyMock.replay(emitter);
EasyMock.expect(databaseRuleManager.getRulesWithDefault(EasyMock.anyObject())).andReturn(Lists.newArrayList(new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-01T12:00:00.000Z"), ImmutableMap.of("hot", 1)), new IntervalLoadRule(Intervals.of("2012-01-01T00:00:00.000Z/2012-01-02T00:00:00.000Z"), ImmutableMap.of("normal", 1)))).atLeastOnce();
EasyMock.replay(databaseRuleManager);
DruidCluster druidCluster = DruidClusterBuilder.newBuilder().addTier("normal", new ServerHolder(new DruidServer("serverNorm", "hostNorm", null, 1000, ServerType.HISTORICAL, "normal", 0).toImmutableDruidServer(), mockPeon)).build();
ListeningExecutorService exec = MoreExecutors.listeningDecorator(Executors.newFixedThreadPool(1));
BalancerStrategy balancerStrategy = new CostBalancerStrategyFactory().createBalancerStrategy(exec);
DruidCoordinatorRuntimeParams params = makeCoordinatorRuntimeParams(druidCluster, balancerStrategy).withEmitter(emitter).build();
ruleRunner.run(params);
exec.shutdown();
EasyMock.verify(emitter);
EasyMock.verify(mockPeon);
}
use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class BroadcastDistributionRuleTest method setUp.
@Before
public void setUp() {
smallSegment = new DataSegment("small_source", Intervals.of("0/1000"), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 0);
for (int i = 0; i < 3; i++) {
largeSegments.add(new DataSegment("large_source", Intervals.of((i * 1000) + "/" + ((i + 1) * 1000)), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 100));
}
for (int i = 0; i < 2; i++) {
largeSegments2.add(new DataSegment("large_source2", Intervals.of((i * 1000) + "/" + ((i + 1) * 1000)), DateTimes.nowUtc().toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 100));
}
holderOfSmallSegment = new ServerHolder(new DruidServer("serverHot2", "hostHot2", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(smallSegment).toImmutableDruidServer(), new LoadQueuePeonTester());
holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverHot1", "hostHot1", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(largeSegments.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester()));
holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverNorm1", "hostNorm1", null, 1000, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester()));
holdersOfLargeSegments.add(new ServerHolder(new DruidServer("serverNorm2", "hostNorm2", null, 100, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments.get(2)).toImmutableDruidServer(), new LoadQueuePeonTester()));
holdersOfLargeSegments2.add(new ServerHolder(new DruidServer("serverHot3", "hostHot3", null, 1000, ServerType.HISTORICAL, "hot", 0).addDataSegment(largeSegments2.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester()));
holdersOfLargeSegments2.add(new ServerHolder(new DruidServer("serverNorm3", "hostNorm3", null, 100, ServerType.HISTORICAL, DruidServer.DEFAULT_TIER, 0).addDataSegment(largeSegments2.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester()));
activeServer = new ServerHolder(new DruidServer("active", "host1", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(largeSegments.get(0)).toImmutableDruidServer(), new LoadQueuePeonTester());
decommissioningServer1 = new ServerHolder(new DruidServer("decommissioning1", "host2", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(smallSegment).toImmutableDruidServer(), new LoadQueuePeonTester(), true);
decommissioningServer2 = new ServerHolder(new DruidServer("decommissioning2", "host3", null, 100, ServerType.HISTORICAL, "tier1", 0).addDataSegment(largeSegments.get(1)).toImmutableDruidServer(), new LoadQueuePeonTester(), true);
druidCluster = DruidClusterBuilder.newBuilder().addTier("hot", holdersOfLargeSegments.get(0), holderOfSmallSegment, holdersOfLargeSegments2.get(0)).addTier(DruidServer.DEFAULT_TIER, holdersOfLargeSegments.get(1), holdersOfLargeSegments.get(2), holdersOfLargeSegments2.get(1)).build();
secondCluster = DruidClusterBuilder.newBuilder().addTier("tier1", activeServer, decommissioningServer1, decommissioningServer2).build();
}
use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class IntervalsResourceTest method setUp.
@Before
public void setUp() {
inventoryView = EasyMock.createStrictMock(InventoryView.class);
server = EasyMock.createStrictMock(DruidServer.class);
request = EasyMock.createStrictMock(HttpServletRequest.class);
dataSegmentList = new ArrayList<>();
dataSegmentList.add(new DataSegment("datasource1", Intervals.of("2010-01-01T00:00:00.000Z/P1D"), "", null, null, null, null, 0x9, 20));
dataSegmentList.add(new DataSegment("datasource1", Intervals.of("2010-01-22T00:00:00.000Z/P1D"), "", null, null, null, null, 0x9, 10));
dataSegmentList.add(new DataSegment("datasource2", Intervals.of("2010-01-01T00:00:00.000Z/P1D"), "", null, null, null, null, 0x9, 5));
server = new DruidServer("who", "host", null, 1234, ServerType.HISTORICAL, "tier1", 0);
server.addDataSegment(dataSegmentList.get(0));
server.addDataSegment(dataSegmentList.get(1));
server.addDataSegment(dataSegmentList.get(2));
}
use of org.apache.druid.client.DruidServer in project druid by druid-io.
the class SystemSchemaTest method mockDataServer.
private DruidServer mockDataServer(String name, long currentSize, long maxSize, String tier) {
final DruidServer server = EasyMock.createMock(DruidServer.class);
EasyMock.expect(serverInventoryView.getInventoryValue(name)).andReturn(server).once();
EasyMock.expect(server.getCurrSize()).andReturn(currentSize).once();
EasyMock.expect(server.getMaxSize()).andReturn(maxSize).once();
EasyMock.expect(server.getTier()).andReturn(tier).once();
return server;
}
Aggregations