use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class UnifiedIndexerAppenderatorsManagerTest method test_limitedPool_persist.
@Test
public void test_limitedPool_persist() throws IOException {
final UnifiedIndexerAppenderatorsManager.LimitedPoolIndexMerger limitedPoolIndexMerger = new UnifiedIndexerAppenderatorsManager.LimitedPoolIndexMerger(new NoopIndexMerger(), DirectQueryProcessingPool.INSTANCE);
final File file = new File("xyz");
// Three forms of persist.
Assert.assertEquals(file, limitedPoolIndexMerger.persist(null, null, file, null, null, null));
Assert.assertEquals(file, limitedPoolIndexMerger.persist(null, null, file, null, null));
// Need a mocked index for this test, since getInterval is called on it.
final IncrementalIndex index = EasyMock.createMock(IncrementalIndex.class);
EasyMock.expect(index.getInterval()).andReturn(null);
EasyMock.replay(index);
Assert.assertEquals(file, limitedPoolIndexMerger.persist(index, file, null, null));
EasyMock.verify(index);
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class QueryRunnerTestHelper method makeUnionQueryRunners.
@SuppressWarnings("unchecked")
public static Collection<?> makeUnionQueryRunners(QueryRunnerFactory factory) {
final IncrementalIndex rtIndex = TestIndex.getIncrementalTestIndex();
final QueryableIndex mMappedTestIndex = TestIndex.getMMappedTestIndex();
final QueryableIndex mergedRealtimeIndex = TestIndex.mergedRealtimeIndex();
return Arrays.asList(makeUnionQueryRunner(factory, new IncrementalIndexSegment(rtIndex, SEGMENT_ID), "rtIndex"), makeUnionQueryRunner(factory, new QueryableIndexSegment(mMappedTestIndex, SEGMENT_ID), "mMappedTestIndex"), makeUnionQueryRunner(factory, new QueryableIndexSegment(mergedRealtimeIndex, SEGMENT_ID), "mergedRealtimeIndex"));
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class QueryRunnerTestHelper method makeQueryRunners.
public static <T, QueryType extends Query<T>> List<QueryRunner<T>> makeQueryRunners(QueryRunnerFactory<T, QueryType> factory) {
final IncrementalIndex rtIndex = TestIndex.getIncrementalTestIndex();
final IncrementalIndex noRollupRtIndex = TestIndex.getNoRollupIncrementalTestIndex();
final QueryableIndex mMappedTestIndex = TestIndex.getMMappedTestIndex();
final QueryableIndex noRollupMMappedTestIndex = TestIndex.getNoRollupMMappedTestIndex();
final QueryableIndex mergedRealtimeIndex = TestIndex.mergedRealtimeIndex();
return ImmutableList.of(makeQueryRunner(factory, new IncrementalIndexSegment(rtIndex, SEGMENT_ID), ("rtIndex")), makeQueryRunner(factory, new IncrementalIndexSegment(noRollupRtIndex, SEGMENT_ID), "noRollupRtIndex"), makeQueryRunner(factory, new QueryableIndexSegment(mMappedTestIndex, SEGMENT_ID), "mMappedTestIndex"), makeQueryRunner(factory, new QueryableIndexSegment(noRollupMMappedTestIndex, SEGMENT_ID), "noRollupMMappedTestIndex"), makeQueryRunner(factory, new QueryableIndexSegment(mergedRealtimeIndex, SEGMENT_ID), "mergedRealtimeIndex"));
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class BroadcastJoinableMMappedQueryableSegmentizerFactoryTest method testSegmentizer.
@Test
public void testSegmentizer() throws IOException, SegmentLoadingException {
final ObjectMapper mapper = new DefaultObjectMapper();
mapper.registerModule(new SegmentizerModule());
final IndexIO indexIO = new IndexIO(mapper, () -> 0);
mapper.setInjectableValues(new InjectableValues.Std().addValue(ExprMacroTable.class.getName(), TestExprMacroTable.INSTANCE).addValue(ObjectMapper.class.getName(), mapper).addValue(IndexIO.class, indexIO).addValue(DataSegment.PruneSpecsHolder.class, DataSegment.PruneSpecsHolder.DEFAULT));
IndexMerger indexMerger = new IndexMergerV9(mapper, indexIO, OffHeapMemorySegmentWriteOutMediumFactory.instance());
SegmentizerFactory expectedFactory = new BroadcastJoinableMMappedQueryableSegmentizerFactory(indexIO, KEY_COLUMNS);
Interval testInterval = Intervals.of("2011-01-12T00:00:00.000Z/2011-05-01T00:00:00.000Z");
IncrementalIndex data = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv");
List<String> columnNames = data.getColumnNames();
File segment = new File(temporaryFolder.newFolder(), "segment");
File persistedSegmentRoot = indexMerger.persist(data, testInterval, segment, new IndexSpec(null, null, null, null, expectedFactory), null);
File factoryJson = new File(persistedSegmentRoot, "factory.json");
Assert.assertTrue(factoryJson.exists());
SegmentizerFactory factory = mapper.readValue(factoryJson, SegmentizerFactory.class);
Assert.assertTrue(factory instanceof BroadcastJoinableMMappedQueryableSegmentizerFactory);
Assert.assertEquals(expectedFactory, factory);
// load a segment
final DataSegment dataSegment = new DataSegment(TABLE_NAME, testInterval, DateTimes.nowUtc().toString(), ImmutableMap.of(), columnNames, ImmutableList.of(), null, null, persistedSegmentRoot.getTotalSpace());
final Segment loaded = factory.factorize(dataSegment, persistedSegmentRoot, false, SegmentLazyLoadFailCallback.NOOP);
final BroadcastSegmentIndexedTable table = (BroadcastSegmentIndexedTable) loaded.as(IndexedTable.class);
Assert.assertNotNull(table);
}
use of org.apache.druid.segment.incremental.IncrementalIndex in project druid by druid-io.
the class SegmentManagerBroadcastJoinIndexedTableTest method testLoadMultipleIndexedTable.
@Test
public void testLoadMultipleIndexedTable() throws IOException, SegmentLoadingException {
final DataSource dataSource = new GlobalTableDataSource(TABLE_NAME);
Assert.assertFalse(joinableFactory.isDirectlyJoinable(dataSource));
final String version = DateTimes.nowUtc().toString();
final String version2 = DateTimes.nowUtc().plus(1000L).toString();
final String interval = "2011-01-12T00:00:00.000Z/2011-05-01T00:00:00.000Z";
final String interval2 = "2011-01-12T00:00:00.000Z/2011-03-28T00:00:00.000Z";
IncrementalIndex data = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv.bottom");
IncrementalIndex data2 = TestIndex.makeRealtimeIndex("druid.sample.numeric.tsv.top");
Assert.assertTrue(segmentManager.loadSegment(createSegment(data, interval, version), false, SegmentLazyLoadFailCallback.NOOP));
Assert.assertTrue(joinableFactory.isDirectlyJoinable(dataSource));
Optional<Joinable> maybeJoinable = makeJoinable(dataSource);
Assert.assertTrue(maybeJoinable.isPresent());
Joinable joinable = maybeJoinable.get();
// cardinality currently tied to number of rows,
Assert.assertEquals(733, joinable.getCardinality("market"));
Assert.assertEquals(733, joinable.getCardinality("placement"));
Assert.assertEquals(Optional.of(ImmutableSet.of("preferred")), joinable.getCorrelatedColumnValues("market", "spot", "placement", Long.MAX_VALUE, false));
// add another segment with smaller interval, only partially overshadows so there will be 2 segments in timeline
Assert.assertTrue(segmentManager.loadSegment(createSegment(data2, interval2, version2), false, SegmentLazyLoadFailCallback.NOOP));
expectedException.expect(ISE.class);
expectedException.expectMessage(StringUtils.format("Currently only single segment datasources are supported for broadcast joins, dataSource[%s] has multiple segments. Reingest the data so that it is entirely contained within a single segment to use in JOIN queries.", TABLE_NAME));
// this will explode because datasource has multiple segments which is an invalid state for the joinable factory
makeJoinable(dataSource);
}
Aggregations