use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class ParallelIndexTuningConfigTest method testSerdeWithMaxNumSubTasksAndMaxNumConcurrentSubTasks.
@Test
public void testSerdeWithMaxNumSubTasksAndMaxNumConcurrentSubTasks() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("Can't use both maxNumSubTasks and maxNumConcurrentSubTasks");
final int maxNumSubTasks = 250;
final ParallelIndexTuningConfig tuningConfig = new ParallelIndexTuningConfig(null, null, null, 10, 1000L, null, null, null, null, new DynamicPartitionsSpec(100, 100L), new IndexSpec(new RoaringBitmapSerdeFactory(true), CompressionStrategy.UNCOMPRESSED, CompressionStrategy.LZF, LongEncodingStrategy.LONGS), new IndexSpec(), 1, false, true, 10000L, OffHeapMemorySegmentWriteOutMediumFactory.instance(), maxNumSubTasks, maxNumSubTasks, 100, 20L, new Duration(3600), 128, null, null, false, null, null, null, null, null);
}
use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class ParallelIndexTuningConfigTest method testConstructorWithDynamicPartitionsSpecAndForceGuaranteedRollupFailToCreate.
@Test
public void testConstructorWithDynamicPartitionsSpecAndForceGuaranteedRollupFailToCreate() {
expectedException.expect(IllegalArgumentException.class);
expectedException.expectMessage("cannot be used for perfect rollup");
final boolean forceGuaranteedRollup = true;
new ParallelIndexTuningConfig(null, null, null, 10, 1000L, null, null, null, null, new DynamicPartitionsSpec(100, null), new IndexSpec(new RoaringBitmapSerdeFactory(true), CompressionStrategy.UNCOMPRESSED, CompressionStrategy.LZF, LongEncodingStrategy.LONGS), new IndexSpec(), 1, forceGuaranteedRollup, true, 10000L, OffHeapMemorySegmentWriteOutMediumFactory.instance(), null, 10, 100, 20L, new Duration(3600), 128, null, null, false, null, null, null, null, null);
}
use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class ParallelIndexTuningConfigTest method testSerdeWithMaxRowsPerSegment.
@Test
public void testSerdeWithMaxRowsPerSegment() throws IOException {
final ParallelIndexTuningConfig tuningConfig = new ParallelIndexTuningConfig(null, null, null, 10, 1000L, null, null, null, null, new DynamicPartitionsSpec(100, 100L), new IndexSpec(new RoaringBitmapSerdeFactory(true), CompressionStrategy.UNCOMPRESSED, CompressionStrategy.LZF, LongEncodingStrategy.LONGS), new IndexSpec(), 1, false, true, 10000L, OffHeapMemorySegmentWriteOutMediumFactory.instance(), null, 250, 100, 20L, new Duration(3600), 128, null, null, false, null, null, null, null, null);
final byte[] json = mapper.writeValueAsBytes(tuningConfig);
final ParallelIndexTuningConfig fromJson = (ParallelIndexTuningConfig) mapper.readValue(json, TuningConfig.class);
Assert.assertEquals(fromJson, tuningConfig);
}
use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class ParallelIndexTuningConfigTest method testSerdeWithMaxNumConcurrentSubTasks.
@Test
public void testSerdeWithMaxNumConcurrentSubTasks() throws IOException {
final int maxNumConcurrentSubTasks = 250;
final ParallelIndexTuningConfig tuningConfig = new ParallelIndexTuningConfig(null, null, null, 10, 1000L, null, null, null, null, new DynamicPartitionsSpec(100, 100L), new IndexSpec(new RoaringBitmapSerdeFactory(true), CompressionStrategy.UNCOMPRESSED, CompressionStrategy.LZF, LongEncodingStrategy.LONGS), new IndexSpec(), 1, false, true, 10000L, OffHeapMemorySegmentWriteOutMediumFactory.instance(), null, maxNumConcurrentSubTasks, 100, 20L, new Duration(3600), 128, null, null, false, null, null, null, null, null);
final byte[] json = mapper.writeValueAsBytes(tuningConfig);
final ParallelIndexTuningConfig fromJson = (ParallelIndexTuningConfig) mapper.readValue(json, TuningConfig.class);
Assert.assertEquals(fromJson, tuningConfig);
}
use of org.apache.druid.indexer.partitions.DynamicPartitionsSpec in project druid by druid-io.
the class PartialCompactionTest method testPartialCompactHashAndDynamicPartitionedSegments.
@Test
public void testPartialCompactHashAndDynamicPartitionedSegments() {
final Map<Interval, List<DataSegment>> hashPartitionedSegments = SegmentUtils.groupSegmentsByInterval(runTestTask(new HashedPartitionsSpec(null, 3, null), TaskState.SUCCESS, false));
final Map<Interval, List<DataSegment>> linearlyPartitionedSegments = SegmentUtils.groupSegmentsByInterval(runTestTask(new DynamicPartitionsSpec(10, null), TaskState.SUCCESS, true));
// Pick half of each partition lists to compact together
hashPartitionedSegments.values().forEach(segmentsInInterval -> segmentsInInterval.sort(Comparator.comparing(segment -> segment.getShardSpec().getPartitionNum())));
linearlyPartitionedSegments.values().forEach(segmentsInInterval -> segmentsInInterval.sort(Comparator.comparing(segment -> segment.getShardSpec().getPartitionNum())));
final List<DataSegment> segmentsToCompact = new ArrayList<>();
for (List<DataSegment> segmentsInInterval : hashPartitionedSegments.values()) {
segmentsToCompact.addAll(segmentsInInterval.subList(segmentsInInterval.size() / 2, segmentsInInterval.size()));
}
for (List<DataSegment> segmentsInInterval : linearlyPartitionedSegments.values()) {
segmentsToCompact.addAll(segmentsInInterval.subList(0, segmentsInInterval.size() / 2));
}
final CompactionTask compactionTask = newCompactionTaskBuilder().inputSpec(SpecificSegmentsSpec.fromSegments(segmentsToCompact)).tuningConfig(newTuningConfig(new DynamicPartitionsSpec(20, null), 2, false)).build();
final Map<Interval, List<DataSegment>> compactedSegments = SegmentUtils.groupSegmentsByInterval(runTask(compactionTask, TaskState.SUCCESS));
for (List<DataSegment> segmentsInInterval : compactedSegments.values()) {
final int expectedAtomicUpdateGroupSize = segmentsInInterval.size();
for (DataSegment segment : segmentsInInterval) {
Assert.assertEquals(expectedAtomicUpdateGroupSize, segment.getShardSpec().getAtomicUpdateGroupSize());
}
}
}
Aggregations