use of org.apache.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class CalciteTests method createMockWalker.
public static SpecificSegmentsQuerySegmentWalker createMockWalker(final QueryRunnerFactoryConglomerate conglomerate, final File tmpDir, final QueryScheduler scheduler, final JoinableFactory joinableFactory) {
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA_DIFFERENT_DIM3_M1_TYPES).rows(ROWS2).buildMMappedIndex();
final QueryableIndex forbiddenIndex = IndexBuilder.create().tmpDir(new File(tmpDir, "forbidden")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA).rows(FORBIDDEN_ROWS).buildMMappedIndex();
final QueryableIndex indexNumericDims = IndexBuilder.create().tmpDir(new File(tmpDir, "3")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA_NUMERIC_DIMS).rows(ROWS1_WITH_NUMERIC_DIMS).buildMMappedIndex();
final QueryableIndex index4 = IndexBuilder.create().tmpDir(new File(tmpDir, "4")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA).rows(ROWS1_WITH_FULL_TIMESTAMP).buildMMappedIndex();
final QueryableIndex indexLotsOfColumns = IndexBuilder.create().tmpDir(new File(tmpDir, "5")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA_LOTS_O_COLUMNS).rows(ROWS_LOTS_OF_COLUMNS).buildMMappedIndex();
final QueryableIndex someDatasourceIndex = IndexBuilder.create().tmpDir(new File(tmpDir, "6")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA).rows(ROWS1).buildMMappedIndex();
final QueryableIndex someXDatasourceIndex = IndexBuilder.create().tmpDir(new File(tmpDir, "7")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA_WITH_X_COLUMNS).rows(RAW_ROWS1_X).buildMMappedIndex();
final QueryableIndex userVisitIndex = IndexBuilder.create().tmpDir(new File(tmpDir, "8")).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(INDEX_SCHEMA).rows(USER_VISIT_ROWS).buildMMappedIndex();
return new SpecificSegmentsQuerySegmentWalker(conglomerate, INJECTOR.getInstance(LookupExtractorFactoryContainerProvider.class), joinableFactory, scheduler).add(DataSegment.builder().dataSource(DATASOURCE1).interval(index1.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index1).add(DataSegment.builder().dataSource(DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index2).add(DataSegment.builder().dataSource(FORBIDDEN_DATASOURCE).interval(forbiddenIndex.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), forbiddenIndex).add(DataSegment.builder().dataSource(DATASOURCE3).interval(indexNumericDims.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), indexNumericDims).add(DataSegment.builder().dataSource(DATASOURCE4).interval(index4.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index4).add(DataSegment.builder().dataSource(DATASOURCE5).interval(indexLotsOfColumns.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), indexLotsOfColumns).add(DataSegment.builder().dataSource(SOME_DATASOURCE).interval(indexLotsOfColumns.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), someDatasourceIndex).add(DataSegment.builder().dataSource(SOMEXDATASOURCE).interval(indexLotsOfColumns.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), someXDatasourceIndex).add(DataSegment.builder().dataSource(BROADCAST_DATASOURCE).interval(indexNumericDims.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), indexNumericDims).add(DataSegment.builder().dataSource(USERVISITDATASOURCE).interval(userVisitIndex.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), userVisitIndex);
}
use of org.apache.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class DefaultOfflineAppenderatorFactoryTest method testBuild.
@Test
public void testBuild() throws IOException, SegmentNotWritableException {
Injector injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.<Module>of(new Module() {
@Override
public void configure(Binder binder) {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to("druid/tool");
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(9999);
binder.bindConstant().annotatedWith(Names.named("tlsServicePort")).to(-1);
binder.bind(DruidProcessingConfig.class).toInstance(new DruidProcessingConfig() {
@Override
public String getFormatString() {
return "processing-%s";
}
@Override
public int intermediateComputeSizeBytes() {
return 100 * 1024 * 1024;
}
@Override
public int getNumThreads() {
return 1;
}
@Override
public int columnCacheSizeBytes() {
return 25 * 1024 * 1024;
}
});
binder.bind(ColumnConfig.class).to(DruidProcessingConfig.class);
}
}));
ObjectMapper objectMapper = injector.getInstance(ObjectMapper.class);
AppenderatorFactory defaultOfflineAppenderatorFactory = objectMapper.readerFor(AppenderatorFactory.class).readValue("{\"type\":\"offline\"}");
final Map<String, Object> parserMap = objectMapper.convertValue(new MapInputRowParser(new JSONParseSpec(new TimestampSpec("ts", "auto", null), DimensionsSpec.EMPTY, null, null, null)), Map.class);
DataSchema schema = new DataSchema("dataSourceName", parserMap, new AggregatorFactory[] { new CountAggregatorFactory("count"), new LongSumAggregatorFactory("met", "met") }, new UniformGranularitySpec(Granularities.MINUTE, Granularities.NONE, null), null, objectMapper);
RealtimeTuningConfig tuningConfig = new RealtimeTuningConfig(null, 75000, null, null, null, null, temporaryFolder.newFolder(), null, null, null, null, null, null, 0, 0, null, null, null, null, null);
Appenderator appenderator = defaultOfflineAppenderatorFactory.build(schema, tuningConfig, new FireDepartmentMetrics());
try {
Assert.assertEquals("dataSourceName", appenderator.getDataSource());
Assert.assertEquals(null, appenderator.startJob());
SegmentIdWithShardSpec identifier = new SegmentIdWithShardSpec("dataSourceName", Intervals.of("2000/2001"), "A", new LinearShardSpec(0));
Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.add(identifier, StreamAppenderatorTest.ir("2000", "bar", 1), null);
Assert.assertEquals(1, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.add(identifier, StreamAppenderatorTest.ir("2000", "baz", 1), null);
Assert.assertEquals(2, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.close();
Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
} finally {
appenderator.close();
}
}
Aggregations