use of org.apache.druid.timeline.partition.SingleDimensionShardSpec in project druid by druid-io.
the class RangePartitionAdjustingCorePartitionSizeTest method testLessPartitionsThanBuckets.
@Test
public void testLessPartitionsThanBuckets() throws IOException {
final File inputDir = temporaryFolder.newFolder();
for (int i = 0; i < 2; i++) {
try (final Writer writer = Files.newBufferedWriter(new File(inputDir, "test_" + i).toPath(), StandardCharsets.UTF_8)) {
writer.write(StringUtils.format("2020-01-01T00:00:00,aaa,b1,10\n"));
}
}
for (int i = 0; i < 3; i++) {
try (final Writer writer = Files.newBufferedWriter(new File(inputDir, "test_" + (i + 2)).toPath(), StandardCharsets.UTF_8)) {
writer.write(StringUtils.format("2020-01-01T00:00:00,zzz,b1,10\n"));
}
}
final List<String> partitionDimensions = Collections.singletonList("dim1");
final DimensionBasedPartitionsSpec partitionsSpec = new DimensionRangePartitionsSpec(2, null, partitionDimensions, false);
final List<DataSegment> segments = new ArrayList<>(runTestTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, "test_*", partitionsSpec, maxNumConcurrentSubTasks, TaskState.SUCCESS));
Assert.assertEquals(1, segments.size());
final DataSegment segment = segments.get(0);
Assert.assertSame(SingleDimensionShardSpec.class, segment.getShardSpec().getClass());
final SingleDimensionShardSpec shardSpec = (SingleDimensionShardSpec) segment.getShardSpec();
Assert.assertEquals(1, shardSpec.getNumCorePartitions());
Assert.assertEquals(0, shardSpec.getPartitionNum());
Assert.assertEquals(partitionDimensions, shardSpec.getDimensions());
}
use of org.apache.druid.timeline.partition.SingleDimensionShardSpec in project druid by druid-io.
the class RangePartitionMultiPhaseParallelIndexingTest method testAppendLinearlyPartitionedSegmentsToHashPartitionedDatasourceSuccessfullyAppend.
@Test
public void testAppendLinearlyPartitionedSegmentsToHashPartitionedDatasourceSuccessfullyAppend() {
if (useMultivalueDim) {
return;
}
final int targetRowsPerSegment = NUM_ROW / DIM_FILE_CARDINALITY / NUM_PARTITION;
final Set<DataSegment> publishedSegments = new HashSet<>();
publishedSegments.addAll(runTestTask(new SingleDimensionPartitionsSpec(targetRowsPerSegment, null, DIM1, false), TaskState.SUCCESS, false));
// Append
publishedSegments.addAll(runTestTask(new DynamicPartitionsSpec(5, null), TaskState.SUCCESS, true));
// And append again
publishedSegments.addAll(runTestTask(new DynamicPartitionsSpec(10, null), TaskState.SUCCESS, true));
final Map<Interval, List<DataSegment>> intervalToSegments = new HashMap<>();
publishedSegments.forEach(segment -> intervalToSegments.computeIfAbsent(segment.getInterval(), k -> new ArrayList<>()).add(segment));
for (Entry<Interval, List<DataSegment>> entry : intervalToSegments.entrySet()) {
final List<DataSegment> segments = entry.getValue();
final List<DataSegment> rangedSegments = segments.stream().filter(segment -> segment.getShardSpec().getClass() == SingleDimensionShardSpec.class).collect(Collectors.toList());
final List<DataSegment> linearSegments = segments.stream().filter(segment -> segment.getShardSpec().getClass() == NumberedShardSpec.class).collect(Collectors.toList());
for (DataSegment rangedSegment : rangedSegments) {
final SingleDimensionShardSpec rangeShardSpec = (SingleDimensionShardSpec) rangedSegment.getShardSpec();
for (DataSegment linearSegment : linearSegments) {
Assert.assertEquals(rangedSegment.getInterval(), linearSegment.getInterval());
Assert.assertEquals(rangedSegment.getVersion(), linearSegment.getVersion());
final NumberedShardSpec numberedShardSpec = (NumberedShardSpec) linearSegment.getShardSpec();
Assert.assertEquals(rangeShardSpec.getNumCorePartitions(), numberedShardSpec.getNumCorePartitions());
Assert.assertTrue(rangeShardSpec.getPartitionNum() < numberedShardSpec.getPartitionNum());
}
}
}
}
use of org.apache.druid.timeline.partition.SingleDimensionShardSpec in project druid by druid-io.
the class RangePartitionAdjustingCorePartitionSizeTest method testEqualNumberOfPartitionsToBuckets.
@Test
public void testEqualNumberOfPartitionsToBuckets() throws IOException {
final File inputDir = temporaryFolder.newFolder();
for (int i = 0; i < 10; i++) {
try (final Writer writer = Files.newBufferedWriter(new File(inputDir, "test_" + i).toPath(), StandardCharsets.UTF_8)) {
writer.write(StringUtils.format("2020-01-01T00:00:00,%s,b1,%d\n", "aa" + (i + 10), 10 * (i + 1)));
}
}
final List<String> partitionDimensions = Collections.singletonList("dim1");
final DimensionBasedPartitionsSpec partitionsSpec = new DimensionRangePartitionsSpec(2, null, partitionDimensions, false);
final Set<DataSegment> segments = runTestTask(TIMESTAMP_SPEC, DIMENSIONS_SPEC, INPUT_FORMAT, null, INTERVAL_TO_INDEX, inputDir, "test_*", partitionsSpec, maxNumConcurrentSubTasks, TaskState.SUCCESS);
Assert.assertEquals(5, segments.size());
segments.forEach(segment -> {
Assert.assertSame(SingleDimensionShardSpec.class, segment.getShardSpec().getClass());
final SingleDimensionShardSpec shardSpec = (SingleDimensionShardSpec) segment.getShardSpec();
Assert.assertEquals(5, shardSpec.getNumCorePartitions());
Assert.assertTrue(shardSpec.getPartitionNum() < shardSpec.getNumCorePartitions());
Assert.assertEquals(partitionDimensions, shardSpec.getDimensions());
});
}
use of org.apache.druid.timeline.partition.SingleDimensionShardSpec in project druid by druid-io.
the class CachingClusteredClientTest method makeMockSingleDimensionSelector.
private ServerSelector makeMockSingleDimensionSelector(DruidServer server, String dimension, String start, String end, int partitionNum) {
final DataSegment segment = new DataSegment(SegmentId.dummy(DATA_SOURCE), null, null, null, new SingleDimensionShardSpec(dimension, start, end, partitionNum, SingleDimensionShardSpec.UNKNOWN_NUM_CORE_PARTITIONS), null, 9, 0L);
ServerSelector selector = new ServerSelector(segment, new HighestPriorityTierSelectorStrategy(new RandomServerSelectorStrategy()));
selector.addServerAndUpdateSegment(new QueryableDruidServer(server, null), segment);
return selector;
}
Aggregations