use of org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule in project druid by druid-io.
the class DruidCoordinatorTest method testComputeUnderReplicationCountsPerDataSourcePerTierForSegmentsWithBroadcastRule.
@Test(timeout = 60_000L)
public void testComputeUnderReplicationCountsPerDataSourcePerTierForSegmentsWithBroadcastRule() throws Exception {
final String dataSource = "dataSource";
final String hotTierName = "hot";
final String coldTierName = "cold";
final String tierName1 = "tier1";
final String tierName2 = "tier2";
final Rule broadcastDistributionRule = new ForeverBroadcastDistributionRule();
final String loadPathCold = "/druid/loadqueue/cold:1234";
final String loadPathBroker1 = "/druid/loadqueue/broker1:1234";
final String loadPathBroker2 = "/druid/loadqueue/broker2:1234";
final String loadPathPeon = "/druid/loadqueue/peon:1234";
final DruidServer hotServer = new DruidServer("hot", "hot", null, 5L, ServerType.HISTORICAL, hotTierName, 0);
final DruidServer coldServer = new DruidServer("cold", "cold", null, 5L, ServerType.HISTORICAL, coldTierName, 0);
final DruidServer brokerServer1 = new DruidServer("broker1", "broker1", null, 5L, ServerType.BROKER, tierName1, 0);
final DruidServer brokerServer2 = new DruidServer("broker2", "broker2", null, 5L, ServerType.BROKER, tierName2, 0);
final DruidServer peonServer = new DruidServer("peon", "peon", null, 5L, ServerType.INDEXER_EXECUTOR, tierName2, 0);
final Map<String, DataSegment> dataSegments = ImmutableMap.of("2018-01-02T00:00:00.000Z_2018-01-03T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2018-01-02/P1D"), "v1", null, null, null, null, 0x9, 0), "2018-01-03T00:00:00.000Z_2018-01-04T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2018-01-03/P1D"), "v1", null, null, null, null, 0x9, 0), "2017-01-01T00:00:00.000Z_2017-01-02T00:00:00.000Z", new DataSegment(dataSource, Intervals.of("2017-01-01/P1D"), "v1", null, null, null, null, 0x9, 0));
final LoadQueuePeon loadQueuePeonCold = new CuratorLoadQueuePeon(curator, loadPathCold, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_cold_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_cold-%d"), druidCoordinatorConfig);
final LoadQueuePeon loadQueuePeonBroker1 = new CuratorLoadQueuePeon(curator, loadPathBroker1, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_broker1_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_broker1-%d"), druidCoordinatorConfig);
final LoadQueuePeon loadQueuePeonBroker2 = new CuratorLoadQueuePeon(curator, loadPathBroker2, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_broker2_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_broker2-%d"), druidCoordinatorConfig);
final LoadQueuePeon loadQueuePeonPoenServer = new CuratorLoadQueuePeon(curator, loadPathPeon, objectMapper, Execs.scheduledSingleThreaded("coordinator_test_load_queue_peon_peon_scheduled-%d"), Execs.singleThreaded("coordinator_test_load_queue_peon_peon-%d"), druidCoordinatorConfig);
final PathChildrenCache pathChildrenCacheCold = new PathChildrenCache(curator, loadPathCold, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_cold-%d"));
final PathChildrenCache pathChildrenCacheBroker1 = new PathChildrenCache(curator, loadPathBroker1, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_broker1-%d"));
final PathChildrenCache pathChildrenCacheBroker2 = new PathChildrenCache(curator, loadPathBroker2, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_broker2-%d"));
final PathChildrenCache pathChildrenCachePeon = new PathChildrenCache(curator, loadPathPeon, true, true, Execs.singleThreaded("coordinator_test_path_children_cache_peon-%d"));
loadManagementPeons.putAll(ImmutableMap.of("hot", loadQueuePeon, "cold", loadQueuePeonCold, "broker1", loadQueuePeonBroker1, "broker2", loadQueuePeonBroker2, "peon", loadQueuePeonPoenServer));
loadQueuePeonCold.start();
loadQueuePeonBroker1.start();
loadQueuePeonBroker2.start();
loadQueuePeonPoenServer.start();
pathChildrenCache.start();
pathChildrenCacheCold.start();
pathChildrenCacheBroker1.start();
pathChildrenCacheBroker2.start();
pathChildrenCachePeon.start();
DruidDataSource[] druidDataSources = { new DruidDataSource(dataSource, Collections.emptyMap()) };
dataSegments.values().forEach(druidDataSources[0]::addSegment);
setupSegmentsMetadataMock(druidDataSources[0]);
EasyMock.expect(metadataRuleManager.getRulesWithDefault(EasyMock.anyString())).andReturn(ImmutableList.of(broadcastDistributionRule)).atLeastOnce();
EasyMock.expect(metadataRuleManager.getAllRules()).andReturn(ImmutableMap.of(dataSource, ImmutableList.of(broadcastDistributionRule))).atLeastOnce();
EasyMock.expect(serverInventoryView.getInventory()).andReturn(ImmutableList.of(hotServer, coldServer, brokerServer1, brokerServer2, peonServer)).atLeastOnce();
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(metadataRuleManager, serverInventoryView);
coordinator.start();
// Wait for this coordinator to become leader
leaderAnnouncerLatch.await();
final CountDownLatch assignSegmentLatchHot = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCache, dataSegments, hotServer);
final CountDownLatch assignSegmentLatchCold = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCacheCold, dataSegments, coldServer);
final CountDownLatch assignSegmentLatchBroker1 = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCacheBroker1, dataSegments, brokerServer1);
final CountDownLatch assignSegmentLatchBroker2 = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCacheBroker2, dataSegments, brokerServer2);
final CountDownLatch assignSegmentLatchPeon = createCountDownLatchAndSetPathChildrenCacheListenerWithLatch(3, pathChildrenCachePeon, dataSegments, peonServer);
assignSegmentLatchHot.await();
assignSegmentLatchCold.await();
assignSegmentLatchBroker1.await();
assignSegmentLatchBroker2.await();
assignSegmentLatchPeon.await();
final CountDownLatch coordinatorRunLatch = new CountDownLatch(2);
serviceEmitter.latch = coordinatorRunLatch;
coordinatorRunLatch.await();
Assert.assertEquals(ImmutableMap.of(dataSource, 100.0), coordinator.getLoadStatus());
Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTier = coordinator.computeUnderReplicationCountsPerDataSourcePerTier();
Assert.assertEquals(4, underReplicationCountsPerDataSourcePerTier.size());
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(hotTierName).getLong(dataSource));
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(coldTierName).getLong(dataSource));
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(tierName1).getLong(dataSource));
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTier.get(tierName2).getLong(dataSource));
Map<String, Object2LongMap<String>> underReplicationCountsPerDataSourcePerTierUsingClusterView = coordinator.computeUnderReplicationCountsPerDataSourcePerTierUsingClusterView();
Assert.assertEquals(4, underReplicationCountsPerDataSourcePerTierUsingClusterView.size());
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(hotTierName).getLong(dataSource));
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(coldTierName).getLong(dataSource));
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(tierName1).getLong(dataSource));
Assert.assertEquals(0L, underReplicationCountsPerDataSourcePerTierUsingClusterView.get(tierName2).getLong(dataSource));
coordinator.stop();
leaderUnannouncerLatch.await();
EasyMock.verify(serverInventoryView);
EasyMock.verify(segmentsMetadataManager);
EasyMock.verify(metadataRuleManager);
}
use of org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule in project druid by druid-io.
the class ITBroadcastJoinQueryTest method testBroadcastJoin.
@Test
public void testBroadcastJoin() throws Exception {
final Closer closer = Closer.create();
try {
closer.register(unloader(BROADCAST_JOIN_DATASOURCE));
closer.register(() -> {
// remove broadcast rule
try {
coordinatorClient.postLoadRules(BROADCAST_JOIN_DATASOURCE, ImmutableList.of());
} catch (Exception ignored) {
}
});
// prepare for broadcast by adding forever broadcast load rule
coordinatorClient.postLoadRules(BROADCAST_JOIN_DATASOURCE, ImmutableList.of(new ForeverBroadcastDistributionRule()));
// load the data
String taskJson = replaceJoinTemplate(getResourceAsString(BROADCAST_JOIN_TASK), BROADCAST_JOIN_DATASOURCE);
indexer.submitTask(taskJson);
dataLoaderHelper.waitUntilDatasourceIsReady(BROADCAST_JOIN_DATASOURCE);
// query metadata until druid schema is refreshed and datasource is available joinable
ITRetryUtil.retryUntilTrue(() -> {
try {
queryHelper.testQueriesFromString(queryHelper.getQueryURL(config.getRouterUrl()), replaceJoinTemplate(getResourceAsString(BROADCAST_JOIN_METADATA_QUERIES_RESOURCE), BROADCAST_JOIN_DATASOURCE));
return true;
} catch (Exception ex) {
LOG.error(ex, "SQL metadata not yet in expected state");
return false;
}
}, "waiting for SQL metadata refresh");
// now do some queries
queryHelper.testQueriesFromString(queryHelper.getQueryURL(config.getRouterUrl()), replaceJoinTemplate(getResourceAsString(BROADCAST_JOIN_QUERIES_RESOURCE), BROADCAST_JOIN_DATASOURCE));
} finally {
closer.close();
// query metadata until druid schema is refreshed and datasource is no longer available
ITRetryUtil.retryUntilTrue(() -> {
try {
queryHelper.testQueriesFromString(queryHelper.getQueryURL(config.getRouterUrl()), replaceJoinTemplate(getResourceAsString(BROADCAST_JOIN_METADATA_QUERIES_AFTER_DROP_RESOURCE), BROADCAST_JOIN_DATASOURCE));
return true;
} catch (Exception ex) {
LOG.error(ex, "SQL metadata not yet in expected state");
return false;
}
}, "waiting for SQL metadata refresh");
}
}
use of org.apache.druid.server.coordinator.rules.ForeverBroadcastDistributionRule in project druid by druid-io.
the class UnloadUnusedSegmentsTest method mockRuleManager.
private static void mockRuleManager(MetadataRuleManager metadataRuleManager) {
EasyMock.expect(metadataRuleManager.getRulesWithDefault("datasource1")).andReturn(Collections.singletonList(new ForeverLoadRule(ImmutableMap.of(DruidServer.DEFAULT_TIER, 1, "tier2", 1)))).anyTimes();
EasyMock.expect(metadataRuleManager.getRulesWithDefault("datasource2")).andReturn(Collections.singletonList(new ForeverLoadRule(ImmutableMap.of(DruidServer.DEFAULT_TIER, 1, "tier2", 1)))).anyTimes();
EasyMock.expect(metadataRuleManager.getRulesWithDefault("broadcastDatasource")).andReturn(Collections.singletonList(new ForeverBroadcastDistributionRule())).anyTimes();
EasyMock.replay(metadataRuleManager);
}
Aggregations