use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class SpatialFilterTest method makeMergedQueryableIndex.
private static QueryableIndex makeMergedQueryableIndex(IndexSpec indexSpec) {
try {
IncrementalIndex first = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withMinTimestamp(DATA_INTERVAL.getStartMillis()).withQueryGranularity(Granularities.DAY).withMetrics(METRIC_AGGS).withDimensionsSpec(DimensionsSpec.builder().setSpatialDimensions(Arrays.asList(new SpatialDimensionSchema("dim.geo", Arrays.asList("lat", "long")), new SpatialDimensionSchema("spatialIsRad", Arrays.asList("lat2", "long2")))).build()).build()).setMaxRowCount(1000).build();
IncrementalIndex second = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withMinTimestamp(DATA_INTERVAL.getStartMillis()).withQueryGranularity(Granularities.DAY).withMetrics(METRIC_AGGS).withDimensionsSpec(DimensionsSpec.builder().setSpatialDimensions(Arrays.asList(new SpatialDimensionSchema("dim.geo", Arrays.asList("lat", "long")), new SpatialDimensionSchema("spatialIsRad", Arrays.asList("lat2", "long2")))).build()).build()).setMaxRowCount(1000).build();
IncrementalIndex third = new OnheapIncrementalIndex.Builder().setIndexSchema(new IncrementalIndexSchema.Builder().withMinTimestamp(DATA_INTERVAL.getStartMillis()).withQueryGranularity(Granularities.DAY).withMetrics(METRIC_AGGS).withDimensionsSpec(DimensionsSpec.builder().setSpatialDimensions(Arrays.asList(new SpatialDimensionSchema("dim.geo", Arrays.asList("lat", "long")), new SpatialDimensionSchema("spatialIsRad", Arrays.asList("lat2", "long2")))).build()).build()).setMaxRowCount(NUM_POINTS).build();
first.add(new MapBasedInputRow(DateTimes.of("2013-01-01").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-01").toString(), "dim", "foo", "lat", 0.0f, "long", 0.0f, "val", 17L)));
first.add(new MapBasedInputRow(DateTimes.of("2013-01-02").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-02").toString(), "dim", "foo", "lat", 1.0f, "long", 3.0f, "val", 29L)));
first.add(new MapBasedInputRow(DateTimes.of("2013-01-03").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-03").toString(), "dim", "foo", "lat", 4.0f, "long", 2.0f, "val", 13L)));
first.add(new MapBasedInputRow(DateTimes.of("2013-01-05").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-05").toString(), "dim", "foo", "lat", "_mmx.unknown", "long", "_mmx.unknown", "val", 101L)));
first.add(new MapBasedInputRow(DateTimes.of("2013-01-05").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-05").toString(), "dim", "foo", "dim.geo", "_mmx.unknown", "val", 501L)));
second.add(new MapBasedInputRow(DateTimes.of("2013-01-04").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-04").toString(), "dim", "foo", "lat", 7.0f, "long", 3.0f, "val", 91L)));
second.add(new MapBasedInputRow(DateTimes.of("2013-01-05").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-05").toString(), "dim", "foo", "lat", 8.0f, "long", 6.0f, "val", 47L)));
second.add(new MapBasedInputRow(DateTimes.of("2013-01-05").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-05").toString(), "lat2", 0.0f, "long2", 0.0f, "val", 13L)));
// Add a bunch of random points
Random rand = ThreadLocalRandom.current();
for (int i = 8; i < NUM_POINTS; i++) {
third.add(new MapBasedInputRow(DateTimes.of("2013-01-01").getMillis(), DIMS, ImmutableMap.of("timestamp", DateTimes.of("2013-01-01").toString(), "dim", "boo", "lat", (float) (rand.nextFloat() * 10 + 10.0), "long", (float) (rand.nextFloat() * 10 + 10.0), "val", i)));
}
File tmpFile = File.createTempFile("yay", "who");
tmpFile.delete();
File firstFile = new File(tmpFile, "first");
File secondFile = new File(tmpFile, "second");
File thirdFile = new File(tmpFile, "third");
File mergedFile = new File(tmpFile, "merged");
FileUtils.mkdirp(firstFile);
FileUtils.mkdirp(secondFile);
FileUtils.mkdirp(thirdFile);
FileUtils.mkdirp(mergedFile);
firstFile.deleteOnExit();
secondFile.deleteOnExit();
thirdFile.deleteOnExit();
mergedFile.deleteOnExit();
INDEX_MERGER.persist(first, DATA_INTERVAL, firstFile, indexSpec, null);
INDEX_MERGER.persist(second, DATA_INTERVAL, secondFile, indexSpec, null);
INDEX_MERGER.persist(third, DATA_INTERVAL, thirdFile, indexSpec, null);
QueryableIndex mergedRealtime = INDEX_IO.loadIndex(INDEX_MERGER.mergeQueryableIndex(Arrays.asList(INDEX_IO.loadIndex(firstFile), INDEX_IO.loadIndex(secondFile), INDEX_IO.loadIndex(thirdFile)), true, METRIC_AGGS, mergedFile, indexSpec, null, -1));
return mergedRealtime;
} catch (IOException e) {
throw new RuntimeException(e);
}
}
use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class HashJoinSegmentTest method setUp.
@Before
public void setUp() throws IOException {
allReferencesAcquireCount = 0;
allReferencesCloseCount = 0;
referencedSegmentAcquireCount = 0;
referencedSegmentClosedCount = 0;
indexedTableJoinableReferenceAcquireCount = 0;
indexedTableJoinableReferenceCloseCount = 0;
j0Closed = false;
j1Closed = false;
baseSegment = new QueryableIndexSegment(JoinTestHelper.createFactIndexBuilder(temporaryFolder.newFolder()).buildMMappedIndex(), SegmentId.dummy("facts"));
List<JoinableClause> joinableClauses = ImmutableList.of(new JoinableClause("j0.", new IndexedTableJoinable(JoinTestHelper.createCountriesIndexedTable()) {
@Override
public Optional<Closeable> acquireReferences() {
if (!j0Closed) {
indexedTableJoinableReferenceAcquireCount++;
Closer closer = Closer.create();
closer.register(() -> indexedTableJoinableReferenceCloseCount++);
return Optional.of(closer);
}
return Optional.empty();
}
}, JoinType.LEFT, JoinConditionAnalysis.forExpression("1", "j0.", ExprMacroTable.nil())), new JoinableClause("j1.", new IndexedTableJoinable(JoinTestHelper.createRegionsIndexedTable()) {
@Override
public Optional<Closeable> acquireReferences() {
if (!j1Closed) {
indexedTableJoinableReferenceAcquireCount++;
Closer closer = Closer.create();
closer.register(() -> indexedTableJoinableReferenceCloseCount++);
return Optional.of(closer);
}
return Optional.empty();
}
}, JoinType.LEFT, JoinConditionAnalysis.forExpression("1", "j1.", ExprMacroTable.nil())));
referencedSegment = ReferenceCountingSegment.wrapRootGenerationSegment(baseSegment);
SegmentReference testWrapper = new SegmentReference() {
@Override
public Optional<Closeable> acquireReferences() {
Closer closer = Closer.create();
return referencedSegment.acquireReferences().map(closeable -> {
referencedSegmentAcquireCount++;
closer.register(closeable);
closer.register(() -> referencedSegmentClosedCount++);
return closer;
});
}
@Override
public SegmentId getId() {
return referencedSegment.getId();
}
@Override
public Interval getDataInterval() {
return referencedSegment.getDataInterval();
}
@Nullable
@Override
public QueryableIndex asQueryableIndex() {
return referencedSegment.asQueryableIndex();
}
@Override
public StorageAdapter asStorageAdapter() {
return referencedSegment.asStorageAdapter();
}
@Override
public void close() {
referencedSegment.close();
}
};
hashJoinSegment = new HashJoinSegment(testWrapper, null, joinableClauses, null) {
@Override
public Optional<Closeable> acquireReferences() {
Closer closer = Closer.create();
return super.acquireReferences().map(closeable -> {
allReferencesAcquireCount++;
closer.register(closeable);
closer.register(() -> allReferencesCloseCount++);
return closer;
});
}
};
}
use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class RealtimePlumberSchoolTest method testDimOrderInheritanceHelper.
private void testDimOrderInheritanceHelper(final Object commitMetadata) throws Exception {
List<List<String>> expectedDims = ImmutableList.of(ImmutableList.of("dimD"), ImmutableList.of("dimC"), ImmutableList.of("dimA"), ImmutableList.of("dimB"), ImmutableList.of("dimE"), ImmutableList.of("dimD", "dimC", "dimA", "dimB", "dimE"));
QueryableIndex qindex;
FireHydrant hydrant;
Map<Long, Sink> sinks;
RealtimePlumber plumber = (RealtimePlumber) realtimePlumberSchool.findPlumber(schema2, tuningConfig, metrics);
Assert.assertNull(plumber.startJob());
final CountDownLatch doneSignal = new CountDownLatch(1);
final Committer committer = new Committer() {
@Override
public Object getMetadata() {
return commitMetadata;
}
@Override
public void run() {
doneSignal.countDown();
}
};
plumber.add(getTestInputRowFull("1970-01-01", ImmutableList.of("dimD"), ImmutableList.of("1")), Suppliers.ofInstance(committer));
plumber.add(getTestInputRowFull("1970-01-01", ImmutableList.of("dimC"), ImmutableList.of("1")), Suppliers.ofInstance(committer));
plumber.add(getTestInputRowFull("1970-01-01", ImmutableList.of("dimA"), ImmutableList.of("1")), Suppliers.ofInstance(committer));
plumber.add(getTestInputRowFull("1970-01-01", ImmutableList.of("dimB"), ImmutableList.of("1")), Suppliers.ofInstance(committer));
plumber.add(getTestInputRowFull("1970-01-01", ImmutableList.of("dimE"), ImmutableList.of("1")), Suppliers.ofInstance(committer));
plumber.add(getTestInputRowFull("1970-01-01", ImmutableList.of("dimA", "dimB", "dimC", "dimD", "dimE"), ImmutableList.of("1")), Suppliers.ofInstance(committer));
plumber.persist(committer);
doneSignal.await();
plumber.getSinks().clear();
plumber.finishJob();
RealtimePlumber restoredPlumber = (RealtimePlumber) realtimePlumberSchool.findPlumber(schema2, tuningConfig, metrics);
restoredPlumber.bootstrapSinksFromDisk();
sinks = restoredPlumber.getSinks();
Assert.assertEquals(1, sinks.size());
List<FireHydrant> hydrants = Lists.newArrayList(sinks.get(0L));
for (int i = 0; i < hydrants.size(); i++) {
hydrant = hydrants.get(i);
ReferenceCountingSegment segment = hydrant.getIncrementedSegment();
try {
qindex = segment.asQueryableIndex();
Assert.assertEquals(i, hydrant.getCount());
Assert.assertEquals(expectedDims.get(i), ImmutableList.copyOf(qindex.getAvailableDimensions()));
} finally {
segment.decrement();
}
}
}
use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class SystemSchemaTest method setUp.
@Before
public void setUp() throws Exception {
serverView = EasyMock.createNiceMock(TimelineServerView.class);
client = EasyMock.createMock(DruidLeaderClient.class);
coordinatorClient = EasyMock.createMock(DruidLeaderClient.class);
overlordClient = EasyMock.createMock(DruidLeaderClient.class);
mapper = TestHelper.makeJsonMapper();
responseHolder = EasyMock.createMock(StringFullResponseHolder.class);
responseHandler = EasyMock.createMockBuilder(BytesAccumulatingResponseHandler.class).withConstructor().addMockedMethod("handleResponse", HttpResponse.class, HttpResponseHandler.TrafficCop.class).addMockedMethod("getStatus").createMock();
request = EasyMock.createMock(Request.class);
authMapper = createAuthMapper();
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
final QueryableIndex index3 = IndexBuilder.create().tmpDir(new File(tmpDir, "3")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS3).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(segment1, index1).add(segment2, index2).add(segment3, index3);
druidSchema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), new TestServerInventoryView(walker.getSegments(), realtimeSegments), new SegmentManager(EasyMock.createMock(SegmentLoader.class)), new MapJoinableFactory(ImmutableSet.of(), ImmutableMap.of()), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null);
druidSchema.start();
druidSchema.awaitInitialization();
metadataView = EasyMock.createMock(MetadataSegmentView.class);
druidNodeDiscoveryProvider = EasyMock.createMock(DruidNodeDiscoveryProvider.class);
serverInventoryView = EasyMock.createMock(FilteredServerInventoryView.class);
schema = new SystemSchema(druidSchema, metadataView, serverView, serverInventoryView, EasyMock.createStrictMock(AuthorizerMapper.class), client, client, druidNodeDiscoveryProvider, mapper);
}
use of org.apache.druid.segment.QueryableIndex in project druid by druid-io.
the class DruidSchemaTest method setUp.
@Before
public void setUp() throws Exception {
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1")).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new LongSumAggregatorFactory("m1", "m1")).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(Intervals.of("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2);
final DataSegment segment1 = new DataSegment("foo3", Intervals.of("2012/2013"), "version3", null, ImmutableList.of("dim1", "dim2"), ImmutableList.of("met1", "met2"), new NumberedShardSpec(2, 3), null, 1, 100L, PruneSpecsHolder.DEFAULT);
final List<DataSegment> realtimeSegments = ImmutableList.of(segment1);
serverView = new TestServerInventoryView(walker.getSegments(), realtimeSegments);
druidServers = serverView.getDruidServers();
schema = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema2 = new DruidSchema(CalciteTests.createMockQueryLifecycleFactory(walker, conglomerate), serverView, segmentManager, new MapJoinableFactory(ImmutableSet.of(globalTableJoinable), ImmutableMap.of(globalTableJoinable.getClass(), GlobalTableDataSource.class)), PLANNER_CONFIG_DEFAULT, new NoopEscalator(), new BrokerInternalQueryConfig(), null) {
boolean throwException = true;
@Override
protected DruidTable buildDruidTable(String dataSource) {
DruidTable table = super.buildDruidTable(dataSource);
buildTableLatch.countDown();
return table;
}
@Override
protected Set<SegmentId> refreshSegments(final Set<SegmentId> segments) throws IOException {
if (throwException) {
throwException = false;
throw new RuntimeException("Query[xxxx] url[http://xxxx:8083/druid/v2/] timed out.");
} else {
return super.refreshSegments(segments);
}
}
@Override
void markDataSourceAsNeedRebuild(String datasource) {
super.markDataSourceAsNeedRebuild(datasource);
markDataSourceLatch.countDown();
}
};
schema.start();
schema.awaitInitialization();
}
Aggregations