use of io.druid.segment.incremental.OnheapIncrementalIndex in project druid by druid-io.
the class IngestSegmentFirehoseFactoryTest method constructorFeeder.
@Parameterized.Parameters(name = "{1}")
public static Collection<Object[]> constructorFeeder() throws IOException {
final IndexSpec indexSpec = new IndexSpec();
final HeapMemoryTaskStorage ts = new HeapMemoryTaskStorage(new TaskStorageConfig(null) {
});
final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.NONE).withMinTimestamp(JodaUtils.MIN_INSTANT).withDimensionsSpec(ROW_PARSER).withMetrics(new AggregatorFactory[] { new LongSumAggregatorFactory(METRIC_LONG_NAME, DIM_LONG_NAME), new DoubleSumAggregatorFactory(METRIC_FLOAT_NAME, DIM_FLOAT_NAME) }).build();
final OnheapIncrementalIndex index = new OnheapIncrementalIndex(schema, true, MAX_ROWS * MAX_SHARD_NUMBER);
for (Integer i = 0; i < MAX_ROWS; ++i) {
index.add(ROW_PARSER.parse(buildRow(i.longValue())));
}
if (!persistDir.mkdirs() && !persistDir.exists()) {
throw new IOException(String.format("Could not create directory at [%s]", persistDir.getAbsolutePath()));
}
INDEX_MERGER.persist(index, persistDir, indexSpec);
final TaskLockbox tl = new TaskLockbox(ts);
final IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(null, null, null) {
private final Set<DataSegment> published = Sets.newHashSet();
private final Set<DataSegment> nuked = Sets.newHashSet();
@Override
public List<DataSegment> getUsedSegmentsForInterval(String dataSource, Interval interval) throws IOException {
return ImmutableList.copyOf(segmentSet);
}
@Override
public List<DataSegment> getUsedSegmentsForIntervals(String dataSource, List<Interval> interval) throws IOException {
return ImmutableList.copyOf(segmentSet);
}
@Override
public List<DataSegment> getUnusedSegmentsForInterval(String dataSource, Interval interval) {
return ImmutableList.of();
}
@Override
public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) {
Set<DataSegment> added = Sets.newHashSet();
for (final DataSegment segment : segments) {
if (published.add(segment)) {
added.add(segment);
}
}
return ImmutableSet.copyOf(added);
}
@Override
public void deleteSegments(Set<DataSegment> segments) {
nuked.addAll(segments);
}
};
final LocalTaskActionClientFactory tac = new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tl, mdc, newMockEmitter(), EasyMock.createMock(SupervisorManager.class)));
SegmentHandoffNotifierFactory notifierFactory = EasyMock.createNiceMock(SegmentHandoffNotifierFactory.class);
EasyMock.replay(notifierFactory);
final TaskToolboxFactory taskToolboxFactory = new TaskToolboxFactory(new TaskConfig(tmpDir.getAbsolutePath(), null, null, 50000, null, false, null, null), tac, newMockEmitter(), new DataSegmentPusher() {
@Deprecated
@Override
public String getPathForHadoop(String dataSource) {
return getPathForHadoop();
}
@Override
public String getPathForHadoop() {
throw new UnsupportedOperationException();
}
@Override
public DataSegment push(File file, DataSegment segment) throws IOException {
return segment;
}
}, new DataSegmentKiller() {
@Override
public void kill(DataSegment segments) throws SegmentLoadingException {
}
@Override
public void killAll() throws IOException {
throw new UnsupportedOperationException("not implemented");
}
}, new DataSegmentMover() {
@Override
public DataSegment move(DataSegment dataSegment, Map<String, Object> targetLoadSpec) throws SegmentLoadingException {
return dataSegment;
}
}, new DataSegmentArchiver() {
@Override
public DataSegment archive(DataSegment segment) throws SegmentLoadingException {
return segment;
}
@Override
public DataSegment restore(DataSegment segment) throws SegmentLoadingException {
return segment;
}
}, // segment announcer
null, notifierFactory, // query runner factory conglomerate corporation unionized collective
null, // query executor service
null, // monitor scheduler
null, new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {
@Override
public List<StorageLocationConfig> getLocations() {
return Lists.newArrayList();
}
}, MAPPER)), MAPPER, INDEX_MERGER, INDEX_IO, null, null, INDEX_MERGER_V9);
Collection<Object[]> values = new LinkedList<>();
for (InputRowParser parser : Arrays.<InputRowParser>asList(ROW_PARSER, new MapInputRowParser(new JSONParseSpec(new TimestampSpec(TIME_COLUMN, "auto", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.<String>of()), ImmutableList.of(DIM_FLOAT_NAME, DIM_LONG_NAME), ImmutableList.<SpatialDimensionSchema>of()), null, null)))) {
for (List<String> dim_names : Arrays.<List<String>>asList(null, ImmutableList.of(DIM_NAME))) {
for (List<String> metric_names : Arrays.<List<String>>asList(null, ImmutableList.of(METRIC_LONG_NAME, METRIC_FLOAT_NAME))) {
values.add(new Object[] { new IngestSegmentFirehoseFactory(DATA_SOURCE_NAME, FOREVER, new SelectorDimFilter(DIM_NAME, DIM_VALUE, null), dim_names, metric_names, Guice.createInjector(new Module() {
@Override
public void configure(Binder binder) {
binder.bind(TaskToolboxFactory.class).toInstance(taskToolboxFactory);
}
}), INDEX_IO), String.format("DimNames[%s]MetricNames[%s]ParserDimNames[%s]", dim_names == null ? "null" : "dims", metric_names == null ? "null" : "metrics", parser == ROW_PARSER ? "dims" : "null"), parser });
}
}
}
return values;
}
use of io.druid.segment.incremental.OnheapIncrementalIndex in project druid by druid-io.
the class IngestSegmentFirehoseFactoryTimelineTest method persist.
private static Map<String, Object> persist(File tmpDir, InputRow... rows) {
final File persistDir = new File(tmpDir, UUID.randomUUID().toString());
final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.NONE).withMinTimestamp(JodaUtils.MIN_INSTANT).withDimensionsSpec(ROW_PARSER).withMetrics(new AggregatorFactory[] { new LongSumAggregatorFactory(METRICS[0], METRICS[0]) }).build();
final OnheapIncrementalIndex index = new OnheapIncrementalIndex(schema, true, rows.length);
for (InputRow row : rows) {
try {
index.add(row);
} catch (IndexSizeExceededException e) {
throw Throwables.propagate(e);
}
}
try {
INDEX_MERGER.persist(index, persistDir, new IndexSpec());
} catch (IOException e) {
throw Throwables.propagate(e);
}
return ImmutableMap.<String, Object>of("type", "local", "path", persistDir.getAbsolutePath());
}
use of io.druid.segment.incremental.OnheapIncrementalIndex in project druid by druid-io.
the class SearchQueryRunnerTest method testSearchWithNullValueInDimension.
@Test
public void testSearchWithNullValueInDimension() throws Exception {
IncrementalIndex<Aggregator> index = new OnheapIncrementalIndex(new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.NONE).withMinTimestamp(new DateTime("2011-01-12T00:00:00.000Z").getMillis()).build(), true, 10);
index.add(new MapBasedInputRow(1481871600000L, Arrays.asList("name", "host"), ImmutableMap.<String, Object>of("name", "name1", "host", "host")));
index.add(new MapBasedInputRow(1481871670000L, Arrays.asList("name", "table"), ImmutableMap.<String, Object>of("name", "name2", "table", "table")));
SearchQuery searchQuery = Druids.newSearchQueryBuilder().dimensions(new DefaultDimensionSpec("table", "table")).dataSource(QueryRunnerTestHelper.dataSource).granularity(QueryRunnerTestHelper.allGran).intervals(QueryRunnerTestHelper.fullOnInterval).context(ImmutableMap.<String, Object>of("searchStrategy", "cursorOnly")).build();
QueryRunnerFactory factory = new SearchQueryRunnerFactory(selector, toolChest, QueryRunnerTestHelper.NOOP_QUERYWATCHER);
QueryRunner runner = factory.createRunner(new QueryableIndexSegment("asdf", TestIndex.persistRealtimeAndLoadMMapped(index)));
List<SearchHit> expectedHits = Lists.newLinkedList();
expectedHits.add(new SearchHit("table", "table", 1));
expectedHits.add(new SearchHit("table", "", 1));
checkSearchQuery(searchQuery, runner, expectedHits);
}
use of io.druid.segment.incremental.OnheapIncrementalIndex in project druid by druid-io.
the class EmptyIndexTest method testEmptyIndex.
@Test
public void testEmptyIndex() throws Exception {
File tmpDir = File.createTempFile("emptyIndex", "");
if (!tmpDir.delete()) {
throw new IllegalStateException("tmp delete failed");
}
if (!tmpDir.mkdir()) {
throw new IllegalStateException("tmp mkdir failed");
}
try {
IncrementalIndex emptyIndex = new OnheapIncrementalIndex(0, Granularities.NONE, new AggregatorFactory[0], 1000);
IncrementalIndexAdapter emptyIndexAdapter = new IncrementalIndexAdapter(new Interval("2012-08-01/P3D"), emptyIndex, new ConciseBitmapFactory());
TestHelper.getTestIndexMerger().merge(Lists.<IndexableAdapter>newArrayList(emptyIndexAdapter), true, new AggregatorFactory[0], tmpDir, new IndexSpec());
QueryableIndex emptyQueryableIndex = TestHelper.getTestIndexIO().loadIndex(tmpDir);
Assert.assertEquals("getDimensionNames", 0, Iterables.size(emptyQueryableIndex.getAvailableDimensions()));
Assert.assertEquals("getMetricNames", 0, Iterables.size(emptyQueryableIndex.getColumnNames()));
Assert.assertEquals("getDataInterval", new Interval("2012-08-01/P3D"), emptyQueryableIndex.getDataInterval());
Assert.assertEquals("getReadOnlyTimestamps", 0, emptyQueryableIndex.getColumn(Column.TIME_COLUMN_NAME).getLength());
} finally {
FileUtils.deleteDirectory(tmpDir);
}
}
use of io.druid.segment.incremental.OnheapIncrementalIndex in project druid by druid-io.
the class IndexBuilder method buildIncrementalIndexWithRows.
private static IncrementalIndex buildIncrementalIndexWithRows(IncrementalIndexSchema schema, int maxRows, Iterable<InputRow> rows) {
Preconditions.checkNotNull(schema, "schema");
final IncrementalIndex incrementalIndex = new OnheapIncrementalIndex(schema, true, maxRows);
for (InputRow row : rows) {
try {
incrementalIndex.add(row);
} catch (IndexSizeExceededException e) {
throw Throwables.propagate(e);
}
}
return incrementalIndex;
}
Aggregations