use of io.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class QuantileSqlAggregatorTest method setUp.
@Before
public void setUp() throws Exception {
Calcites.setSystemProperties();
// Note: this is needed in order to properly register the serde for Histogram.
new ApproximateHistogramDruidModule().configure(null);
final QueryableIndex index = IndexBuilder.create().tmpDir(temporaryFolder.newFolder()).indexMerger(TestHelper.getTestIndexMergerV9()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new ApproximateHistogramAggregatorFactory("hist_m1", "m1", null, null, null, null) }).withRollup(false).build()).rows(CalciteTests.ROWS1).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(CalciteTests.queryRunnerFactoryConglomerate()).add(DataSegment.builder().dataSource(DATA_SOURCE).interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index);
final PlannerConfig plannerConfig = new PlannerConfig();
final SchemaPlus rootSchema = Calcites.createRootSchema(CalciteTests.createMockSchema(walker, plannerConfig));
final DruidOperatorTable operatorTable = new DruidOperatorTable(ImmutableSet.<SqlAggregator>of(new QuantileSqlAggregator()), ImmutableSet.<SqlExtractionOperator>of());
plannerFactory = new PlannerFactory(rootSchema, walker, operatorTable, plannerConfig);
}
use of io.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class DefaultOfflineAppenderatorFactoryTest method testBuild.
@Test
public void testBuild() throws IOException, SegmentNotWritableException {
Injector injector = Initialization.makeInjectorWithModules(GuiceInjectors.makeStartupInjector(), ImmutableList.<Module>of(new Module() {
@Override
public void configure(Binder binder) {
binder.bindConstant().annotatedWith(Names.named("serviceName")).to("druid/tool");
binder.bindConstant().annotatedWith(Names.named("servicePort")).to(9999);
binder.bind(DruidProcessingConfig.class).toInstance(new DruidProcessingConfig() {
@Override
public String getFormatString() {
return "processing-%s";
}
@Override
public int intermediateComputeSizeBytes() {
return 100 * 1024 * 1024;
}
@Override
public int getNumThreads() {
return 1;
}
@Override
public int columnCacheSizeBytes() {
return 25 * 1024 * 1024;
}
});
binder.bind(ColumnConfig.class).to(DruidProcessingConfig.class);
}
}));
ObjectMapper objectMapper = injector.getInstance(ObjectMapper.class);
AppenderatorFactory defaultOfflineAppenderatorFactory = objectMapper.reader(AppenderatorFactory.class).readValue("{\"type\":\"offline\"}");
final Map<String, Object> parserMap = objectMapper.convertValue(new MapInputRowParser(new JSONParseSpec(new TimestampSpec("ts", "auto", null), new DimensionsSpec(null, null, null), null, null)), Map.class);
DataSchema schema = new DataSchema("dataSourceName", parserMap, new AggregatorFactory[] { new CountAggregatorFactory("count"), new LongSumAggregatorFactory("met", "met") }, new UniformGranularitySpec(Granularities.MINUTE, Granularities.NONE, null), objectMapper);
RealtimeTuningConfig tuningConfig = new RealtimeTuningConfig(75000, null, null, temporaryFolder.newFolder(), null, null, null, null, null, null, 0, 0, null, null);
try (Appenderator appenderator = defaultOfflineAppenderatorFactory.build(schema, tuningConfig, new FireDepartmentMetrics())) {
Assert.assertEquals("dataSourceName", appenderator.getDataSource());
Assert.assertEquals(null, appenderator.startJob());
SegmentIdentifier identifier = new SegmentIdentifier("dataSourceName", new Interval("2000/2001"), "A", new LinearShardSpec(0));
Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.add(identifier, AppenderatorTest.IR("2000", "bar", 1), Suppliers.ofInstance(Committers.nil()));
Assert.assertEquals(1, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.add(identifier, AppenderatorTest.IR("2000", "baz", 1), Suppliers.ofInstance(Committers.nil()));
Assert.assertEquals(2, ((AppenderatorImpl) appenderator).getRowsInMemory());
appenderator.close();
Assert.assertEquals(0, ((AppenderatorImpl) appenderator).getRowsInMemory());
}
}
use of io.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class ActionBasedUsedSegmentCheckerTest method testBasic.
@Test
public void testBasic() throws IOException {
final TaskActionClient taskActionClient = EasyMock.createMock(TaskActionClient.class);
EasyMock.expect(taskActionClient.submit(new SegmentListUsedAction("bar", null, ImmutableList.of(new Interval("2002/P1D"))))).andReturn(ImmutableList.of(DataSegment.builder().dataSource("bar").interval(new Interval("2002/P1D")).shardSpec(new LinearShardSpec(0)).version("b").build(), DataSegment.builder().dataSource("bar").interval(new Interval("2002/P1D")).shardSpec(new LinearShardSpec(1)).version("b").build()));
EasyMock.expect(taskActionClient.submit(new SegmentListUsedAction("foo", null, ImmutableList.of(new Interval("2000/P1D"), new Interval("2001/P1D"))))).andReturn(ImmutableList.of(DataSegment.builder().dataSource("foo").interval(new Interval("2000/P1D")).shardSpec(new LinearShardSpec(0)).version("a").build(), DataSegment.builder().dataSource("foo").interval(new Interval("2000/P1D")).shardSpec(new LinearShardSpec(1)).version("a").build(), DataSegment.builder().dataSource("foo").interval(new Interval("2001/P1D")).shardSpec(new LinearShardSpec(1)).version("b").build(), DataSegment.builder().dataSource("foo").interval(new Interval("2002/P1D")).shardSpec(new LinearShardSpec(1)).version("b").build()));
EasyMock.replay(taskActionClient);
final UsedSegmentChecker checker = new ActionBasedUsedSegmentChecker(taskActionClient);
final Set<DataSegment> segments = checker.findUsedSegments(ImmutableSet.of(new SegmentIdentifier("foo", new Interval("2000/P1D"), "a", new LinearShardSpec(1)), new SegmentIdentifier("foo", new Interval("2001/P1D"), "b", new LinearShardSpec(0)), new SegmentIdentifier("bar", new Interval("2002/P1D"), "b", new LinearShardSpec(0))));
Assert.assertEquals(ImmutableSet.of(DataSegment.builder().dataSource("foo").interval(new Interval("2000/P1D")).shardSpec(new LinearShardSpec(1)).version("a").build(), DataSegment.builder().dataSource("bar").interval(new Interval("2002/P1D")).shardSpec(new LinearShardSpec(0)).version("b").build()), segments);
EasyMock.verify(taskActionClient);
}
use of io.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class SegmentAllocateActionTest method testAddToExistingLinearShardSpecsSameGranularity.
@Test
public void testAddToExistingLinearShardSpecsSameGranularity() throws Exception {
final Task task = new NoopTask(null, 0, 0, null, null, null);
taskActionTestKit.getMetadataStorageCoordinator().announceHistoricalSegments(ImmutableSet.of(DataSegment.builder().dataSource(DATA_SOURCE).interval(Granularities.HOUR.bucket(PARTY_TIME)).version(PARTY_TIME.toString()).shardSpec(new LinearShardSpec(0)).build(), DataSegment.builder().dataSource(DATA_SOURCE).interval(Granularities.HOUR.bucket(PARTY_TIME)).version(PARTY_TIME.toString()).shardSpec(new LinearShardSpec(1)).build()));
taskActionTestKit.getTaskLockbox().add(task);
final SegmentIdentifier id1 = allocate(task, PARTY_TIME, Granularities.NONE, Granularities.HOUR, "s1", null);
final SegmentIdentifier id2 = allocate(task, PARTY_TIME, Granularities.NONE, Granularities.HOUR, "s1", id1.getIdentifierAsString());
assertSameIdentifier(id1, new SegmentIdentifier(DATA_SOURCE, Granularities.HOUR.bucket(PARTY_TIME), PARTY_TIME.toString(), new LinearShardSpec(2)));
assertSameIdentifier(id2, new SegmentIdentifier(DATA_SOURCE, Granularities.HOUR.bucket(PARTY_TIME), PARTY_TIME.toString(), new LinearShardSpec(3)));
}
use of io.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class DruidSchemaTest method setUp.
@Before
public void setUp() throws Exception {
Calcites.setSystemProperties();
final File tmpDir = temporaryFolder.newFolder();
final QueryableIndex index1 = IndexBuilder.create().tmpDir(new File(tmpDir, "1")).indexMerger(TestHelper.getTestIndexMergerV9()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new HyperUniquesAggregatorFactory("unique_dim1", "dim1") }).withRollup(false).build()).rows(ROWS1).buildMMappedIndex();
final QueryableIndex index2 = IndexBuilder.create().tmpDir(new File(tmpDir, "2")).indexMerger(TestHelper.getTestIndexMergerV9()).schema(new IncrementalIndexSchema.Builder().withMetrics(new AggregatorFactory[] { new LongSumAggregatorFactory("m1", "m1") }).withRollup(false).build()).rows(ROWS2).buildMMappedIndex();
walker = new SpecificSegmentsQuerySegmentWalker(CalciteTests.queryRunnerFactoryConglomerate()).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(new Interval("2000/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).build(), index1).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(new Interval("2001/P1Y")).version("1").shardSpec(new LinearShardSpec(0)).build(), index2).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE2).interval(index2.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).build(), index2);
schema = new DruidSchema(walker, new TestServerInventoryView(walker.getSegments()), PLANNER_CONFIG_DEFAULT);
schema.start();
schema.awaitInitialization();
}
Aggregations