use of org.apache.druid.client.DruidDataSource in project druid by druid-io.
the class DruidCoordinatorTest method testCoordinatorCustomDutyGroupsRunAsExpected.
@Test(timeout = 3000)
public void testCoordinatorCustomDutyGroupsRunAsExpected() throws Exception {
// Some nessesary setup to start the Coordinator
JacksonConfigManager configManager = EasyMock.createNiceMock(JacksonConfigManager.class);
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorDynamicConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorDynamicConfig.builder().build())).anyTimes();
EasyMock.expect(configManager.watch(EasyMock.eq(CoordinatorCompactionConfig.CONFIG_KEY), EasyMock.anyObject(Class.class), EasyMock.anyObject())).andReturn(new AtomicReference(CoordinatorCompactionConfig.empty())).anyTimes();
EasyMock.replay(configManager);
EasyMock.expect(segmentsMetadataManager.isPollingDatabasePeriodically()).andReturn(true).anyTimes();
DruidDataSource dataSource = new DruidDataSource("dataSource1", Collections.emptyMap());
DataSegment dataSegment = new DataSegment("dataSource1", Intervals.of("2010-01-01/P1D"), "v1", null, null, null, null, 0x9, 0);
dataSource.addSegment(dataSegment);
DataSourcesSnapshot dataSourcesSnapshot = new DataSourcesSnapshot(ImmutableMap.of(dataSource.getName(), dataSource.toImmutableDruidDataSource()));
EasyMock.expect(segmentsMetadataManager.getSnapshotOfDataSourcesWithAllUsedSegments()).andReturn(dataSourcesSnapshot).anyTimes();
EasyMock.replay(segmentsMetadataManager);
EasyMock.expect(serverInventoryView.isStarted()).andReturn(true).anyTimes();
EasyMock.replay(serverInventoryView);
// Create CoordinatorCustomDutyGroups
// We will have two groups and each group has one duty
CountDownLatch latch1 = new CountDownLatch(1);
CoordinatorCustomDuty duty1 = new CoordinatorCustomDuty() {
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
latch1.countDown();
return params;
}
};
CoordinatorCustomDutyGroup group1 = new CoordinatorCustomDutyGroup("group1", Duration.standardSeconds(1), ImmutableList.of(duty1));
CountDownLatch latch2 = new CountDownLatch(1);
CoordinatorCustomDuty duty2 = new CoordinatorCustomDuty() {
@Override
public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) {
latch2.countDown();
return params;
}
};
CoordinatorCustomDutyGroup group2 = new CoordinatorCustomDutyGroup("group2", Duration.standardSeconds(1), ImmutableList.of(duty2));
CoordinatorCustomDutyGroups groups = new CoordinatorCustomDutyGroups(ImmutableSet.of(group1, group2));
coordinator = new DruidCoordinator(druidCoordinatorConfig, new ZkPathsConfig() {
@Override
public String getBase() {
return "druid";
}
}, configManager, segmentsMetadataManager, serverInventoryView, metadataRuleManager, () -> curator, serviceEmitter, scheduledExecutorFactory, null, null, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
// count down when this coordinator becomes the leader
leaderAnnouncerLatch.countDown();
}
@Override
public void unannounce(DruidNode node) {
leaderUnannouncerLatch.countDown();
}
}, druidNode, loadManagementPeons, null, new HashSet<>(), groups, new CostBalancerStrategyFactory(), EasyMock.createNiceMock(LookupCoordinatorManager.class), new TestDruidLeaderSelector(), null, ZkEnablementConfig.ENABLED);
coordinator.start();
// Wait until group 1 duty ran for latch1 to countdown
latch1.await();
// Wait until group 2 duty ran for latch2 to countdown
latch2.await();
}
use of org.apache.druid.client.DruidDataSource in project druid by druid-io.
the class DataSourcesResource method getSimpleDatasource.
private Map<String, Map<String, Object>> getSimpleDatasource(String dataSourceName) {
Map<String, Object> tiers = new HashMap<>();
Map<String, Object> segments = new HashMap<>();
Map<String, Map<String, Object>> retVal = ImmutableMap.of("tiers", tiers, "segments", segments);
Set<SegmentId> totalDistinctSegments = new HashSet<>();
Map<String, HashSet<Object>> tierDistinctSegments = new HashMap<>();
long totalSegmentSize = 0;
long totalReplicatedSize = 0;
DateTime minTime = DateTimes.MAX;
DateTime maxTime = DateTimes.MIN;
String tier;
for (DruidServer druidServer : serverInventoryView.getInventory()) {
DruidDataSource druidDataSource = druidServer.getDataSource(dataSourceName);
tier = druidServer.getTier();
if (druidDataSource == null) {
continue;
}
tierDistinctSegments.computeIfAbsent(tier, t -> new HashSet<>());
long dataSourceSegmentSize = 0;
long replicatedSegmentSize = 0;
for (DataSegment dataSegment : druidDataSource.getSegments()) {
// tier segments stats
if (!tierDistinctSegments.get(tier).contains(dataSegment.getId())) {
dataSourceSegmentSize += dataSegment.getSize();
tierDistinctSegments.get(tier).add(dataSegment.getId());
}
// total segments stats
if (totalDistinctSegments.add(dataSegment.getId())) {
totalSegmentSize += dataSegment.getSize();
minTime = DateTimes.min(minTime, dataSegment.getInterval().getStart());
maxTime = DateTimes.max(maxTime, dataSegment.getInterval().getEnd());
}
totalReplicatedSize += dataSegment.getSize();
replicatedSegmentSize += dataSegment.getSize();
}
// tier stats
Map<String, Object> tierStats = (Map) tiers.get(tier);
if (tierStats == null) {
tierStats = new HashMap<>();
tiers.put(druidServer.getTier(), tierStats);
}
tierStats.put("segmentCount", tierDistinctSegments.get(tier).size());
long segmentSize = MapUtils.getLong(tierStats, "size", 0L);
tierStats.put("size", segmentSize + dataSourceSegmentSize);
long replicatedSize = MapUtils.getLong(tierStats, "replicatedSize", 0L);
tierStats.put("replicatedSize", replicatedSize + replicatedSegmentSize);
}
segments.put("count", totalDistinctSegments.size());
segments.put("size", totalSegmentSize);
segments.put("replicatedSize", totalReplicatedSize);
segments.put("minTime", minTime);
segments.put("maxTime", maxTime);
return retVal;
}
use of org.apache.druid.client.DruidDataSource in project druid by druid-io.
the class TiersResource method getTierDataSources.
@GET
@Path("/{tierName}")
@Produces(MediaType.APPLICATION_JSON)
public Response getTierDataSources(@PathParam("tierName") String tierName, @QueryParam("simple") String simple) {
if (simple != null) {
Map<String, Map<Interval, Map<IntervalProperties, Object>>> tierToStatsPerInterval = new HashMap<>();
for (DruidServer druidServer : serverInventoryView.getInventory()) {
if (druidServer.getTier().equalsIgnoreCase(tierName)) {
for (DataSegment dataSegment : druidServer.iterateAllSegments()) {
Map<IntervalProperties, Object> properties = tierToStatsPerInterval.computeIfAbsent(dataSegment.getDataSource(), dsName -> new HashMap<>()).computeIfAbsent(dataSegment.getInterval(), interval -> new EnumMap<>(IntervalProperties.class));
properties.merge(IntervalProperties.size, dataSegment.getSize(), (a, b) -> (Long) a + (Long) b);
properties.merge(IntervalProperties.count, 1, (a, b) -> (Integer) a + (Integer) b);
}
}
}
return Response.ok(tierToStatsPerInterval).build();
}
Set<String> retVal = serverInventoryView.getInventory().stream().filter(druidServer -> druidServer.getTier().equalsIgnoreCase(tierName)).flatMap(druidServer -> druidServer.getDataSources().stream().map(DruidDataSource::getName)).collect(Collectors.toSet());
return Response.ok(retVal).build();
}
use of org.apache.druid.client.DruidDataSource in project druid by druid-io.
the class DruidCoordinator method getLoadStatus.
public Map<String, Double> getLoadStatus() {
final Map<String, Double> loadStatus = new HashMap<>();
final Collection<ImmutableDruidDataSource> dataSources = segmentsMetadataManager.getImmutableDataSourcesWithAllUsedSegments();
for (ImmutableDruidDataSource dataSource : dataSources) {
final Set<DataSegment> segments = Sets.newHashSet(dataSource.getSegments());
final int numPublishedSegments = segments.size();
// remove loaded segments
for (DruidServer druidServer : serverInventoryView.getInventory()) {
final DruidDataSource loadedView = druidServer.getDataSource(dataSource.getName());
if (loadedView != null) {
// Please see https://github.com/apache/druid/pull/5632 and LoadStatusBenchmark for more info.
for (DataSegment serverSegment : loadedView.getSegments()) {
segments.remove(serverSegment);
}
}
}
final int numUnavailableSegments = segments.size();
loadStatus.put(dataSource.getName(), 100 * ((double) (numPublishedSegments - numUnavailableSegments) / (double) numPublishedSegments));
}
return loadStatus;
}
use of org.apache.druid.client.DruidDataSource in project druid by druid-io.
the class DataSourcesResourceTest method testMarkAsUsedNonOvershadowedSegmentsIntervalException.
@Test
public void testMarkAsUsedNonOvershadowedSegmentsIntervalException() {
DruidDataSource dataSource = new DruidDataSource("datasource1", new HashMap<>());
Interval interval = Intervals.of("2010-01-22/P1D");
int numUpdatedSegments = segmentsMetadataManager.markAsUsedNonOvershadowedSegmentsInInterval(EasyMock.eq("datasource1"), EasyMock.eq(interval));
EasyMock.expect(numUpdatedSegments).andThrow(new RuntimeException("Error!")).once();
EasyMock.expect(inventoryView.getInventory()).andReturn(ImmutableList.of(server)).once();
EasyMock.expect(server.getDataSource("datasource1")).andReturn(dataSource).once();
EasyMock.replay(segmentsMetadataManager, inventoryView, server);
DataSourcesResource dataSourcesResource = new DataSourcesResource(inventoryView, segmentsMetadataManager, null, null, null, null);
Response response = dataSourcesResource.markAsUsedNonOvershadowedSegments("datasource1", new DataSourcesResource.MarkDataSourceSegmentsPayload(interval, null));
Assert.assertEquals(500, response.getStatus());
EasyMock.verify(segmentsMetadataManager, inventoryView, server);
}
Aggregations