use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DruidCoordinatorTest method testMoveSegment.
@Test
public void testMoveSegment() {
final DataSegment segment = EasyMock.createNiceMock(DataSegment.class);
EasyMock.expect(segment.getId()).andReturn(SegmentId.dummy("dummySegment"));
EasyMock.expect(segment.getDataSource()).andReturn("dummyDataSource");
EasyMock.replay(segment);
loadQueuePeon = EasyMock.createNiceMock(LoadQueuePeon.class);
EasyMock.expect(loadQueuePeon.getLoadQueueSize()).andReturn(new Long(1));
loadQueuePeon.markSegmentToDrop(segment);
EasyMock.expectLastCall().once();
Capture<LoadPeonCallback> loadCallbackCapture = Capture.newInstance();
Capture<LoadPeonCallback> dropCallbackCapture = Capture.newInstance();
loadQueuePeon.loadSegment(EasyMock.anyObject(DataSegment.class), EasyMock.capture(loadCallbackCapture));
EasyMock.expectLastCall().once();
loadQueuePeon.dropSegment(EasyMock.anyObject(DataSegment.class), EasyMock.capture(dropCallbackCapture));
EasyMock.expectLastCall().once();
loadQueuePeon.unmarkSegmentToDrop(segment);
EasyMock.expectLastCall().once();
EasyMock.expect(loadQueuePeon.getSegmentsToDrop()).andReturn(new HashSet<>()).once();
EasyMock.replay(loadQueuePeon);
ImmutableDruidDataSource druidDataSource = EasyMock.createNiceMock(ImmutableDruidDataSource.class);
EasyMock.expect(druidDataSource.getSegment(EasyMock.anyObject(SegmentId.class))).andReturn(segment);
EasyMock.replay(druidDataSource);
EasyMock.expect(segmentsMetadataManager.getImmutableDataSourceWithUsedSegments(EasyMock.anyString())).andReturn(druidDataSource);
EasyMock.replay(segmentsMetadataManager);
EasyMock.expect(dataSourcesSnapshot.getDataSource(EasyMock.anyString())).andReturn(druidDataSource).anyTimes();
EasyMock.replay(dataSourcesSnapshot);
scheduledExecutorFactory = EasyMock.createNiceMock(ScheduledExecutorFactory.class);
EasyMock.replay(scheduledExecutorFactory);
EasyMock.replay(metadataRuleManager);
ImmutableDruidDataSource dataSource = EasyMock.createMock(ImmutableDruidDataSource.class);
EasyMock.expect(dataSource.getSegments()).andReturn(Collections.singletonList(segment)).anyTimes();
EasyMock.replay(dataSource);
EasyMock.expect(druidServer.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("from", null, null, 5L, ServerType.HISTORICAL, null, 0), 1L, ImmutableMap.of("dummyDataSource", dataSource), 1)).atLeastOnce();
EasyMock.replay(druidServer);
DruidServer druidServer2 = EasyMock.createMock(DruidServer.class);
EasyMock.expect(druidServer2.toImmutableDruidServer()).andReturn(new ImmutableDruidServer(new DruidServerMetadata("to", null, null, 5L, ServerType.HISTORICAL, null, 0), 1L, ImmutableMap.of("dummyDataSource", dataSource), 1)).atLeastOnce();
EasyMock.replay(druidServer2);
loadManagementPeons.put("from", loadQueuePeon);
loadManagementPeons.put("to", loadQueuePeon);
EasyMock.expect(serverInventoryView.isSegmentLoadedByServer("to", segment)).andReturn(true).once();
EasyMock.replay(serverInventoryView);
mockCoordinatorRuntimeParams();
coordinator.moveSegment(coordinatorRuntimeParams, druidServer.toImmutableDruidServer(), druidServer2.toImmutableDruidServer(), segment, null);
LoadPeonCallback loadCallback = loadCallbackCapture.getValue();
loadCallback.execute();
LoadPeonCallback dropCallback = dropCallbackCapture.getValue();
dropCallback.execute();
EasyMock.verify(druidServer, druidServer2, loadQueuePeon, serverInventoryView, metadataRuleManager);
EasyMock.verify(coordinatorRuntimeParams);
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class CuratorDruidCoordinatorTest method setupView.
private void setupView() throws Exception {
baseView = new BatchServerInventoryView(zkPathsConfig, curator, jsonMapper, Predicates.alwaysTrue(), "test") {
@Override
public void registerSegmentCallback(Executor exec, final SegmentCallback callback) {
super.registerSegmentCallback(exec, new SegmentCallback() {
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
CallbackAction res = callback.segmentAdded(server, segment);
segmentAddedLatch.countDown();
return res;
}
@Override
public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
CallbackAction res = callback.segmentRemoved(server, segment);
segmentRemovedLatch.countDown();
return res;
}
@Override
public CallbackAction segmentViewInitialized() {
CallbackAction res = callback.segmentViewInitialized();
segmentViewInitLatch.countDown();
return res;
}
});
}
};
serverView = new CoordinatorServerView(baseView, new CoordinatorSegmentWatcherConfig());
baseView.start();
sourceLoadQueuePeon.start();
destinationLoadQueuePeon.start();
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, baseView, metadataRuleManager, () -> curator, new NoopServiceEmitter(), scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, null, new CoordinatorCustomDutyGroups(ImmutableSet.of()), new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class BatchServerInventoryViewTest method setUp.
@Before
public void setUp() throws Exception {
testingCluster = new TestingCluster(1);
testingCluster.start();
cf = CuratorFrameworkFactory.builder().connectString(testingCluster.getConnectString()).retryPolicy(new ExponentialBackoffRetry(1, 10)).compressionProvider(new PotentiallyGzippedCompressionProvider(true)).build();
cf.start();
cf.blockUntilConnected();
cf.create().creatingParentsIfNeeded().forPath(TEST_BASE_PATH);
jsonMapper = TestHelper.makeJsonMapper();
announcer = new Announcer(cf, Execs.directExecutor());
announcer.start();
DruidServerMetadata serverMetadata = new DruidServerMetadata("id", "host", null, Long.MAX_VALUE, ServerType.HISTORICAL, "tier", 0);
ZkPathsConfig zkPathsConfig = new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
};
serverAnnouncer = new CuratorDataSegmentServerAnnouncer(serverMetadata, zkPathsConfig, announcer, jsonMapper);
serverAnnouncer.announce();
segmentAnnouncer = new BatchDataSegmentAnnouncer(serverMetadata, new BatchDataSegmentAnnouncerConfig() {
@Override
public int getSegmentsPerNode() {
return 50;
}
}, zkPathsConfig, announcer, jsonMapper);
testSegments = Sets.newConcurrentHashSet();
for (int i = 0; i < INITIAL_SEGMENTS; i++) {
testSegments.add(makeSegment(i));
}
batchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, cf, jsonMapper, Predicates.alwaysTrue(), "test");
batchServerInventoryView.start();
inventoryUpdateCounter.set(0);
filteredBatchServerInventoryView = new BatchServerInventoryView(new ZkPathsConfig() {
@Override
public String getBase() {
return TEST_BASE_PATH;
}
}, cf, jsonMapper, new Predicate<Pair<DruidServerMetadata, DataSegment>>() {
@Override
public boolean apply(@Nullable Pair<DruidServerMetadata, DataSegment> input) {
return input.rhs.getInterval().getStart().isBefore(SEGMENT_INTERVAL_START.plusDays(INITIAL_SEGMENTS));
}
}, "test") {
@Override
protected DruidServer addInnerInventory(DruidServer container, String inventoryKey, Set<DataSegment> inventory) {
DruidServer server = super.addInnerInventory(container, inventoryKey, inventory);
inventoryUpdateCounter.incrementAndGet();
return server;
}
};
filteredBatchServerInventoryView.start();
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class TierSelectorStrategyTest method testTierSelectorStrategy.
private void testTierSelectorStrategy(TierSelectorStrategy tierSelectorStrategy, QueryableDruidServer... expectedSelection) {
final ServerSelector serverSelector = new ServerSelector(new DataSegment("test", Intervals.of("2013-01-01/2013-01-02"), DateTimes.of("2013-01-01").toString(), new HashMap<>(), new ArrayList<>(), new ArrayList<>(), NoneShardSpec.instance(), 0, 0L), tierSelectorStrategy);
List<QueryableDruidServer> servers = new ArrayList<>(Arrays.asList(expectedSelection));
List<DruidServerMetadata> expectedCandidates = new ArrayList<>();
for (QueryableDruidServer server : servers) {
expectedCandidates.add(server.getServer().getMetadata());
}
Collections.shuffle(servers);
for (QueryableDruidServer server : servers) {
serverSelector.addServerAndUpdateSegment(server, serverSelector.getSegment());
}
Assert.assertEquals(expectedSelection[0], serverSelector.pick(null));
Assert.assertEquals(expectedSelection[0], serverSelector.pick(EasyMock.createMock(Query.class)));
Assert.assertEquals(expectedCandidates, serverSelector.getCandidates(-1));
Assert.assertEquals(expectedCandidates.subList(0, 2), serverSelector.getCandidates(2));
}
use of org.apache.druid.server.coordination.DruidServerMetadata in project druid by druid-io.
the class DatasourceOptimizerTest method setupViews.
private void setupViews() throws Exception {
baseView = new BatchServerInventoryView(zkPathsConfig, curator, jsonMapper, Predicates.alwaysTrue(), "test") {
@Override
public void registerSegmentCallback(Executor exec, final SegmentCallback callback) {
super.registerSegmentCallback(exec, new SegmentCallback() {
@Override
public CallbackAction segmentAdded(DruidServerMetadata server, DataSegment segment) {
return callback.segmentAdded(server, segment);
}
@Override
public CallbackAction segmentRemoved(DruidServerMetadata server, DataSegment segment) {
return callback.segmentRemoved(server, segment);
}
@Override
public CallbackAction segmentViewInitialized() {
return callback.segmentViewInitialized();
}
});
}
};
brokerServerView = new BrokerServerView(EasyMock.createMock(QueryToolChestWarehouse.class), EasyMock.createMock(QueryWatcher.class), getSmileMapper(), EasyMock.createMock(HttpClient.class), baseView, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()), new NoopServiceEmitter(), new BrokerSegmentWatcherConfig());
baseView.start();
}
Aggregations