use of org.apache.druid.indexing.common.task.CompactionTask.SegmentProvider in project druid by druid-io.
the class CompactionTaskTest method testGranularitySpecWithNotNullRollup.
@Test
public void testGranularitySpecWithNotNullRollup() throws IOException, SegmentLoadingException {
final List<ParallelIndexIngestionSpec> ingestionSpecs = CompactionTask.createIngestionSchema(toolbox, LockGranularity.TIME_CHUNK, new SegmentProvider(DATA_SOURCE, new CompactionIntervalSpec(COMPACTION_INTERVAL, null)), new PartitionConfigurationManager(TUNING_CONFIG), null, null, null, new ClientCompactionTaskGranularitySpec(null, null, true), COORDINATOR_CLIENT, segmentCacheManagerFactory, RETRY_POLICY_FACTORY, IOConfig.DEFAULT_DROP_EXISTING);
Assert.assertEquals(6, ingestionSpecs.size());
for (ParallelIndexIngestionSpec indexIngestionSpec : ingestionSpecs) {
Assert.assertTrue(indexIngestionSpec.getDataSchema().getGranularitySpec().isRollup());
}
}
use of org.apache.druid.indexing.common.task.CompactionTask.SegmentProvider in project druid by druid-io.
the class CompactionTaskTest method testQueryGranularityAndSegmentGranularityNonNull.
@Test
public void testQueryGranularityAndSegmentGranularityNonNull() throws IOException, SegmentLoadingException {
final List<ParallelIndexIngestionSpec> ingestionSpecs = CompactionTask.createIngestionSchema(toolbox, LockGranularity.TIME_CHUNK, new SegmentProvider(DATA_SOURCE, new CompactionIntervalSpec(COMPACTION_INTERVAL, null)), new PartitionConfigurationManager(TUNING_CONFIG), null, null, null, new ClientCompactionTaskGranularitySpec(new PeriodGranularity(Period.months(3), null, null), new PeriodGranularity(Period.months(3), null, null), null), COORDINATOR_CLIENT, segmentCacheManagerFactory, RETRY_POLICY_FACTORY, IOConfig.DEFAULT_DROP_EXISTING);
final List<DimensionsSpec> expectedDimensionsSpec = ImmutableList.of(new DimensionsSpec(getDimensionSchema(new DoubleDimensionSchema("string_to_double"))));
ingestionSpecs.sort((s1, s2) -> Comparators.intervalsByStartThenEnd().compare(s1.getDataSchema().getGranularitySpec().inputIntervals().get(0), s2.getDataSchema().getGranularitySpec().inputIntervals().get(0)));
Assert.assertEquals(1, ingestionSpecs.size());
assertIngestionSchema(ingestionSpecs, expectedDimensionsSpec, AGGREGATORS.stream().map(AggregatorFactory::getCombiningFactory).collect(Collectors.toList()), Collections.singletonList(COMPACTION_INTERVAL), new PeriodGranularity(Period.months(3), null, null), new PeriodGranularity(Period.months(3), null, null), IOConfig.DEFAULT_DROP_EXISTING);
}
use of org.apache.druid.indexing.common.task.CompactionTask.SegmentProvider in project druid by druid-io.
the class CompactionTaskTest method testMissingMetadata.
@Test
public void testMissingMetadata() throws IOException, SegmentLoadingException {
expectedException.expect(RuntimeException.class);
expectedException.expectMessage(CoreMatchers.startsWith("Index metadata doesn't exist for segment"));
final TestIndexIO indexIO = (TestIndexIO) toolbox.getIndexIO();
indexIO.removeMetadata(Iterables.getFirst(indexIO.getQueryableIndexMap().keySet(), null));
final List<DataSegment> segments = new ArrayList<>(SEGMENTS);
CompactionTask.createIngestionSchema(toolbox, LockGranularity.TIME_CHUNK, new SegmentProvider(DATA_SOURCE, SpecificSegmentsSpec.fromSegments(segments)), new PartitionConfigurationManager(TUNING_CONFIG), null, null, null, null, COORDINATOR_CLIENT, segmentCacheManagerFactory, RETRY_POLICY_FACTORY, IOConfig.DEFAULT_DROP_EXISTING);
}
use of org.apache.druid.indexing.common.task.CompactionTask.SegmentProvider in project druid by druid-io.
the class CompactionTaskTest method testCreateIngestionSchemaWithCustomDimensionsSpec.
@Test
public void testCreateIngestionSchemaWithCustomDimensionsSpec() throws IOException, SegmentLoadingException {
final DimensionsSpec customSpec = new DimensionsSpec(Lists.newArrayList(new LongDimensionSchema("timestamp"), new StringDimensionSchema("string_dim_0"), new StringDimensionSchema("string_dim_1"), new StringDimensionSchema("string_dim_2"), new StringDimensionSchema("string_dim_3"), new StringDimensionSchema("string_dim_4"), new LongDimensionSchema("long_dim_0"), new LongDimensionSchema("long_dim_1"), new LongDimensionSchema("long_dim_2"), new LongDimensionSchema("long_dim_3"), new LongDimensionSchema("long_dim_4"), new FloatDimensionSchema("float_dim_0"), new FloatDimensionSchema("float_dim_1"), new FloatDimensionSchema("float_dim_2"), new FloatDimensionSchema("float_dim_3"), new FloatDimensionSchema("float_dim_4"), new DoubleDimensionSchema("double_dim_0"), new DoubleDimensionSchema("double_dim_1"), new DoubleDimensionSchema("double_dim_2"), new DoubleDimensionSchema("double_dim_3"), new DoubleDimensionSchema("double_dim_4"), new StringDimensionSchema(MIXED_TYPE_COLUMN)));
final List<ParallelIndexIngestionSpec> ingestionSpecs = CompactionTask.createIngestionSchema(toolbox, LockGranularity.TIME_CHUNK, new SegmentProvider(DATA_SOURCE, new CompactionIntervalSpec(COMPACTION_INTERVAL, null)), new PartitionConfigurationManager(TUNING_CONFIG), customSpec, null, null, null, COORDINATOR_CLIENT, segmentCacheManagerFactory, RETRY_POLICY_FACTORY, IOConfig.DEFAULT_DROP_EXISTING);
ingestionSpecs.sort((s1, s2) -> Comparators.intervalsByStartThenEnd().compare(s1.getDataSchema().getGranularitySpec().inputIntervals().get(0), s2.getDataSchema().getGranularitySpec().inputIntervals().get(0)));
Assert.assertEquals(6, ingestionSpecs.size());
final List<DimensionsSpec> dimensionsSpecs = new ArrayList<>(6);
IntStream.range(0, 6).forEach(i -> dimensionsSpecs.add(customSpec));
assertIngestionSchema(ingestionSpecs, dimensionsSpecs, AGGREGATORS.stream().map(AggregatorFactory::getCombiningFactory).collect(Collectors.toList()), SEGMENT_INTERVALS, Granularities.MONTH, Granularities.NONE, IOConfig.DEFAULT_DROP_EXISTING);
}
use of org.apache.druid.indexing.common.task.CompactionTask.SegmentProvider in project druid by druid-io.
the class CompactionTaskTest method testNullGranularitySpec.
@Test
public void testNullGranularitySpec() throws IOException, SegmentLoadingException {
final List<ParallelIndexIngestionSpec> ingestionSpecs = CompactionTask.createIngestionSchema(toolbox, LockGranularity.TIME_CHUNK, new SegmentProvider(DATA_SOURCE, new CompactionIntervalSpec(COMPACTION_INTERVAL, null)), new PartitionConfigurationManager(TUNING_CONFIG), null, null, null, null, COORDINATOR_CLIENT, segmentCacheManagerFactory, RETRY_POLICY_FACTORY, IOConfig.DEFAULT_DROP_EXISTING);
final List<DimensionsSpec> expectedDimensionsSpec = getExpectedDimensionsSpecForAutoGeneration();
ingestionSpecs.sort((s1, s2) -> Comparators.intervalsByStartThenEnd().compare(s1.getDataSchema().getGranularitySpec().inputIntervals().get(0), s2.getDataSchema().getGranularitySpec().inputIntervals().get(0)));
Assert.assertEquals(6, ingestionSpecs.size());
assertIngestionSchema(ingestionSpecs, expectedDimensionsSpec, AGGREGATORS.stream().map(AggregatorFactory::getCombiningFactory).collect(Collectors.toList()), SEGMENT_INTERVALS, Granularities.MONTH, Granularities.NONE, IOConfig.DEFAULT_DROP_EXISTING);
}
Aggregations