use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class PartialCompactionTest method testPartialCompactRangeAndDynamicPartitionedSegments.
@Test
public void testPartialCompactRangeAndDynamicPartitionedSegments() {
final Map<Interval, List<DataSegment>> rangePartitionedSegments = SegmentUtils.groupSegmentsByInterval(runTestTask(new SingleDimensionPartitionsSpec(10, null, "dim1", false), TaskState.SUCCESS, false));
final Map<Interval, List<DataSegment>> linearlyPartitionedSegments = SegmentUtils.groupSegmentsByInterval(runTestTask(new DynamicPartitionsSpec(10, null), TaskState.SUCCESS, true));
// Pick half of each partition lists to compact together
rangePartitionedSegments.values().forEach(segmentsInInterval -> segmentsInInterval.sort(Comparator.comparing(segment -> segment.getShardSpec().getPartitionNum())));
linearlyPartitionedSegments.values().forEach(segmentsInInterval -> segmentsInInterval.sort(Comparator.comparing(segment -> segment.getShardSpec().getPartitionNum())));
final List<DataSegment> segmentsToCompact = new ArrayList<>();
for (List<DataSegment> segmentsInInterval : rangePartitionedSegments.values()) {
segmentsToCompact.addAll(segmentsInInterval.subList(segmentsInInterval.size() / 2, segmentsInInterval.size()));
}
for (List<DataSegment> segmentsInInterval : linearlyPartitionedSegments.values()) {
segmentsToCompact.addAll(segmentsInInterval.subList(0, segmentsInInterval.size() / 2));
}
final CompactionTask compactionTask = newCompactionTaskBuilder().inputSpec(SpecificSegmentsSpec.fromSegments(segmentsToCompact)).tuningConfig(newTuningConfig(new DynamicPartitionsSpec(20, null), 2, false)).build();
final Map<Interval, List<DataSegment>> compactedSegments = SegmentUtils.groupSegmentsByInterval(runTask(compactionTask, TaskState.SUCCESS));
for (List<DataSegment> segmentsInInterval : compactedSegments.values()) {
final int expectedAtomicUpdateGroupSize = segmentsInInterval.size();
for (DataSegment segment : segmentsInInterval) {
Assert.assertEquals(expectedAtomicUpdateGroupSize, segment.getShardSpec().getAtomicUpdateGroupSize());
}
}
}
use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class ITAppendBatchIndexTest method submitIngestionTaskAndVerify.
private void submitIngestionTaskAndVerify(String indexDatasource, PartitionsSpec partitionsSpec, boolean appendToExisting, Pair<Boolean, Boolean> segmentAvailabilityConfirmationPair) throws Exception {
InputFormatDetails inputFormatDetails = InputFormatDetails.JSON;
Map inputFormatMap = new ImmutableMap.Builder<String, Object>().put("type", inputFormatDetails.getInputFormatType()).build();
final Function<String, String> sqlInputSourcePropsTransform = spec -> {
try {
spec = StringUtils.replace(spec, "%%PARTITIONS_SPEC%%", jsonMapper.writeValueAsString(partitionsSpec));
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_FILTER%%", "*" + inputFormatDetails.getFileExtension());
spec = StringUtils.replace(spec, "%%INPUT_SOURCE_BASE_DIR%%", "/resources/data/batch_index" + inputFormatDetails.getFolderSuffix());
spec = StringUtils.replace(spec, "%%INPUT_FORMAT%%", jsonMapper.writeValueAsString(inputFormatMap));
spec = StringUtils.replace(spec, "%%APPEND_TO_EXISTING%%", jsonMapper.writeValueAsString(appendToExisting));
spec = StringUtils.replace(spec, "%%DROP_EXISTING%%", jsonMapper.writeValueAsString(false));
if (partitionsSpec instanceof DynamicPartitionsSpec) {
spec = StringUtils.replace(spec, "%%FORCE_GUARANTEED_ROLLUP%%", jsonMapper.writeValueAsString(false));
} else if (partitionsSpec instanceof HashedPartitionsSpec || partitionsSpec instanceof SingleDimensionPartitionsSpec) {
spec = StringUtils.replace(spec, "%%FORCE_GUARANTEED_ROLLUP%%", jsonMapper.writeValueAsString(true));
}
return spec;
} catch (Exception e) {
throw new RuntimeException(e);
}
};
doIndexTest(indexDatasource, INDEX_TASK, sqlInputSourcePropsTransform, null, false, false, true, segmentAvailabilityConfirmationPair);
}
use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class ParallelIndexTuningConfigTest method testConstructorWithSingleDimensionPartitionsSpecAndNonForceGuaranteedRollupFailToCreate.
@Test
public void testConstructorWithSingleDimensionPartitionsSpecAndNonForceGuaranteedRollupFailToCreate() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("DynamicPartitionsSpec must be used for best-effort rollup");
final boolean forceGuaranteedRollup = false;
new ParallelIndexTuningConfig(null, null, null, 10, 1000L, null, null, null, null, new SingleDimensionPartitionsSpec(100, null, "dim", false), new IndexSpec(new RoaringBitmapSerdeFactory(true), CompressionStrategy.UNCOMPRESSED, CompressionStrategy.LZF, LongEncodingStrategy.LONGS), new IndexSpec(), 1, forceGuaranteedRollup, true, 10000L, OffHeapMemorySegmentWriteOutMediumFactory.instance(), null, 10, 100, 20L, new Duration(3600), 128, null, null, false, null, null, null, null, null);
}
use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class ITAutoCompactionTest method testAutoCompactionDutyCanUpdateCompactionConfig.
@Test
public void testAutoCompactionDutyCanUpdateCompactionConfig() throws Exception {
loadData(INDEX_TASK);
try (final Closeable ignored = unloader(fullDatasourceName)) {
final List<String> intervalsBeforeCompaction = coordinator.getSegmentIntervals(fullDatasourceName);
intervalsBeforeCompaction.sort(null);
// 4 segments across 2 days (4 total)...
verifySegmentsCount(4);
verifyQuery(INDEX_QUERIES_RESOURCE);
// Dummy compaction config which will be overwritten
submitCompactionConfig(10000, NO_SKIP_OFFSET);
// New compaction config should overwrites the existing compaction config
submitCompactionConfig(1, NO_SKIP_OFFSET);
LOG.info("Auto compaction test with dynamic partitioning");
// Instead of merging segments, the updated config will split segments!
// ...compacted into 10 new segments across 2 days. 5 new segments each day (10 total)
forceTriggerAutoCompaction(10);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(10, 1);
checkCompactionIntervals(intervalsBeforeCompaction);
LOG.info("Auto compaction test with hash partitioning");
final HashedPartitionsSpec hashedPartitionsSpec = new HashedPartitionsSpec(null, 3, null);
submitCompactionConfig(hashedPartitionsSpec, NO_SKIP_OFFSET, 1, null, null, null, null, false);
// 2 segments published per day after compaction.
forceTriggerAutoCompaction(4);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(hashedPartitionsSpec, 4);
checkCompactionIntervals(intervalsBeforeCompaction);
LOG.info("Auto compaction test with range partitioning");
final SingleDimensionPartitionsSpec rangePartitionsSpec = new SingleDimensionPartitionsSpec(5, null, "city", false);
submitCompactionConfig(rangePartitionsSpec, NO_SKIP_OFFSET, 1, null, null, null, null, false);
forceTriggerAutoCompaction(2);
verifyQuery(INDEX_QUERIES_RESOURCE);
verifySegmentsCompacted(rangePartitionsSpec, 2);
checkCompactionIntervals(intervalsBeforeCompaction);
}
}
use of org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec in project druid by druid-io.
the class IndexTaskTest method testIndexTaskWithSingleDimPartitionsSpecThrowingException.
@Test
public void testIndexTaskWithSingleDimPartitionsSpecThrowingException() throws Exception {
final File tmpDir = temporaryFolder.newFolder();
final IndexTask task = new IndexTask(null, null, createDefaultIngestionSpec(jsonMapper, tmpDir, null, null, createTuningConfigWithPartitionsSpec(new SingleDimensionPartitionsSpec(null, 1, null, false), true), false, false), null);
expectedException.expect(UnsupportedOperationException.class);
expectedException.expectMessage("partitionsSpec[org.apache.druid.indexer.partitions.SingleDimensionPartitionsSpec] is not supported");
task.isReady(createActionClient(task));
}
Aggregations