use of org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec in project druid by druid-io.
the class PartialRangeSegmentGenerateTask method getPartitionDimensions.
private static List<String> getPartitionDimensions(ParallelIndexIngestionSpec ingestionSpec) {
PartitionsSpec partitionsSpec = ingestionSpec.getTuningConfig().getPartitionsSpec();
Preconditions.checkArgument(partitionsSpec instanceof DimensionRangePartitionsSpec, "%s or %s partitionsSpec required", DimensionRangePartitionsSpec.NAME, SingleDimensionPartitionsSpec.NAME);
DimensionRangePartitionsSpec multiDimPartitionsSpec = (DimensionRangePartitionsSpec) partitionsSpec;
List<String> partitionDimensions = multiDimPartitionsSpec.getPartitionDimensions();
Preconditions.checkNotNull(partitionDimensions, "partitionDimension required");
return partitionDimensions;
}
use of org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec in project druid by druid-io.
the class RangePartitionMultiPhaseParallelIndexingTest method createsCorrectRangePartitions.
@Test
public void createsCorrectRangePartitions() throws Exception {
int targetRowsPerSegment = NUM_ROW * 2 / DIM_FILE_CARDINALITY / NUM_PARTITION;
final Set<DataSegment> publishedSegments = runTestTask(new DimensionRangePartitionsSpec(targetRowsPerSegment, null, Collections.singletonList(DIM1), false), useMultivalueDim ? TaskState.FAILED : TaskState.SUCCESS, false);
if (!useMultivalueDim) {
assertRangePartitions(publishedSegments);
}
}
use of org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec in project druid by druid-io.
the class RangePartitionAdjustingCorePartitionSizeTest method testEqualNumberOfPartitionsToBuckets.
@Test
public void testEqualNumberOfPartitionsToBuckets() throws IOException {
final File inputDir = temporaryFolder.newFolder();
for (int i = 0; i < 10; i++) {
try (final Writer writer = Files.newBufferedWriter(new File(inputDir, "test_" + i).toPath(), StandardCharsets.UTF_8)) {
writer.write(StringUtils.format("2020-01-01T00:00:00,%s,b1,%d\n", "aa" + (i + 10), 10 * (i + 1)));
}
}
final List<String> partitionDimensions = Collections.singletonList("dim1");
final DimensionBasedPartitionsSpec partitionsSpec = new DimensionRangePartitionsSpec(2, null, partitionDimensions, false);
final Set<DataSegment> segments = runTestTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, "test_*", partitionsSpec, maxNumConcurrentSubTasks, TaskState.SUCCESS);
Assert.assertEquals(5, segments.size());
segments.forEach(segment -> {
Assert.assertSame(SingleDimensionShardSpec.class, segment.getShardSpec().getClass());
final SingleDimensionShardSpec shardSpec = (SingleDimensionShardSpec) segment.getShardSpec();
Assert.assertEquals(5, shardSpec.getNumCorePartitions());
Assert.assertTrue(shardSpec.getPartitionNum() < shardSpec.getNumCorePartitions());
Assert.assertEquals(partitionDimensions, shardSpec.getDimensions());
});
}
use of org.apache.druid.indexer.partitions.DimensionRangePartitionsSpec in project druid by druid-io.
the class CompactionTaskParallelRunTest method testRunParallelWithMultiDimensionRangePartitioning.
@Test
public void testRunParallelWithMultiDimensionRangePartitioning() throws Exception {
// Range partitioning is not supported with segment lock yet
Assume.assumeFalse(lockGranularity == LockGranularity.SEGMENT);
runIndexTask(null, true);
final Builder builder = new Builder(DATA_SOURCE, getSegmentCacheManagerFactory(), RETRY_POLICY_FACTORY);
final CompactionTask compactionTask = builder.inputSpec(new CompactionIntervalSpec(INTERVAL_TO_INDEX, null)).tuningConfig(newTuningConfig(new DimensionRangePartitionsSpec(7, null, Arrays.asList("dim1", "dim2"), false), 2, true)).build();
final Set<DataSegment> compactedSegments = runTask(compactionTask);
for (DataSegment segment : compactedSegments) {
// Expect compaction state to exist as store compaction state by default
Map<String, String> expectedLongSumMetric = new HashMap<>();
expectedLongSumMetric.put("type", "longSum");
expectedLongSumMetric.put("name", "val");
expectedLongSumMetric.put("fieldName", "val");
expectedLongSumMetric.put("expression", null);
Assert.assertSame(DimensionRangeShardSpec.class, segment.getShardSpec().getClass());
CompactionState expectedState = new CompactionState(new DimensionRangePartitionsSpec(7, null, Arrays.asList("dim1", "dim2"), false), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.of("ts", "dim"))), ImmutableList.of(expectedLongSumMetric), null, compactionTask.getTuningConfig().getIndexSpec().asMap(getObjectMapper()), getObjectMapper().readValue(getObjectMapper().writeValueAsString(new UniformGranularitySpec(Granularities.HOUR, Granularities.MINUTE, true, ImmutableList.of(segment.getInterval()))), Map.class));
Assert.assertEquals(expectedState, segment.getLastCompactionState());
}
}
Aggregations