use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class ParallelIndexSupervisorTaskSerdeTest method forceGuaranteedRollupWithSingleDimPartitionsValid.
@Test
public void forceGuaranteedRollupWithSingleDimPartitionsValid() {
ParallelIndexSupervisorTask task = new ParallelIndexSupervisorTaskBuilder().ingestionSpec(new ParallelIndexIngestionSpecBuilder().forceGuaranteedRollup(true).partitionsSpec(new SingleDimensionPartitionsSpec(1, null, "a", true)).inputIntervals(INTERVALS).build()).build();
PartitionsSpec partitionsSpec = task.getIngestionSchema().getTuningConfig().getPartitionsSpec();
Assert.assertThat(partitionsSpec, CoreMatchers.instanceOf(SingleDimensionPartitionsSpec.class));
}
use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class ParallelIndexSupervisorTaskSerdeTest method forceGuaranteedRollupWithSingleDimPartitionsMissingDimension.
@Test
public void forceGuaranteedRollupWithSingleDimPartitionsMissingDimension() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("partitionDimensions must be specified");
new ParallelIndexSupervisorTaskBuilder().ingestionSpec(new ParallelIndexIngestionSpecBuilder().forceGuaranteedRollup(true).partitionsSpec(new SingleDimensionPartitionsSpec(1, null, null, true)).inputIntervals(INTERVALS).build()).build();
}
use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class CompactionTaskParallelRunTest method testRunParallelWithRangePartitioning.
@Test
public void testRunParallelWithRangePartitioning() throws Exception {
// Range partitioning is not supported with segment lock yet
Assume.assumeFalse(lockGranularity == LockGranularity.SEGMENT);
runIndexTask(null, true);
final Builder builder = new Builder(DATA_SOURCE, getSegmentCacheManagerFactory(), RETRY_POLICY_FACTORY);
final CompactionTask compactionTask = builder.inputSpec(new CompactionIntervalSpec(INTERVAL_TO_INDEX, null)).tuningConfig(newTuningConfig(new SingleDimensionPartitionsSpec(7, null, "dim", false), 2, true)).build();
final Set<DataSegment> compactedSegments = runTask(compactionTask);
for (DataSegment segment : compactedSegments) {
// Expect compaction state to exist as store compaction state by default
Map<String, String> expectedLongSumMetric = new HashMap<>();
expectedLongSumMetric.put("type", "longSum");
expectedLongSumMetric.put("name", "val");
expectedLongSumMetric.put("fieldName", "val");
expectedLongSumMetric.put("expression", null);
Assert.assertSame(SingleDimensionShardSpec.class, segment.getShardSpec().getClass());
CompactionState expectedState = new CompactionState(new SingleDimensionPartitionsSpec(7, null, "dim", false), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.of("ts", "dim"))), ImmutableList.of(expectedLongSumMetric), null, compactionTask.getTuningConfig().getIndexSpec().asMap(getObjectMapper()), getObjectMapper().readValue(getObjectMapper().writeValueAsString(new UniformGranularitySpec(Granularities.HOUR, Granularities.MINUTE, true, ImmutableList.of(segment.getInterval()))), Map.class));
Assert.assertEquals(expectedState, segment.getLastCompactionState());
}
}
use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class HadoopDruidIndexerConfigTest method testGetTargetPartitionSizeWithSingleDimensionPartitionsTargetRowsPerSegment.
@Test
public void testGetTargetPartitionSizeWithSingleDimensionPartitionsTargetRowsPerSegment() {
int targetRowsPerSegment = 123;
SingleDimensionPartitionsSpec partitionsSpec = new SingleDimensionPartitionsSpec(targetRowsPerSegment, null, null, false);
HadoopIngestionSpec spec = new HadoopIngestionSpecBuilder().partitionsSpec(partitionsSpec).build();
HadoopDruidIndexerConfig config = new HadoopDruidIndexerConfig(spec);
int targetPartitionSize = config.getTargetPartitionSize();
Assert.assertEquals(targetRowsPerSegment, targetPartitionSize);
}
use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class HadoopDruidIndexerConfigTest method testGetTargetPartitionSizeWithSingleDimensionPartitionsMaxRowsPerSegment.
@Test
public void testGetTargetPartitionSizeWithSingleDimensionPartitionsMaxRowsPerSegment() {
int maxRowsPerSegment = 456;
SingleDimensionPartitionsSpec partitionsSpec = new SingleDimensionPartitionsSpec(null, maxRowsPerSegment, null, false);
HadoopIngestionSpec spec = new HadoopIngestionSpecBuilder().partitionsSpec(partitionsSpec).build();
HadoopDruidIndexerConfig config = new HadoopDruidIndexerConfig(spec);
int targetPartitionSize = config.getTargetPartitionSize();
Assert.assertEquals(maxRowsPerSegment, targetPartitionSize);
}
Aggregations