use of org.apache.druid.timeline.partition.DimensionRangeShardSpec in project druid by druid-io.
the class RangePartitionMultiPhaseParallelIndexingTest method assertValuesInRange.
private static void assertValuesInRange(List<StringTuple> values, DataSegment segment) {
DimensionRangeShardSpec shardSpec = (DimensionRangeShardSpec) segment.getShardSpec();
StringTuple start = shardSpec.getStartTuple();
StringTuple end = shardSpec.getEndTuple();
Assert.assertTrue(shardSpec.toString(), start != null || end != null);
for (StringTuple value : values) {
if (start != null) {
Assert.assertThat(value.compareTo(start), Matchers.greaterThanOrEqualTo(0));
}
if (end != null) {
if (value == null) {
Assert.assertNull("null values should be in first partition", start);
} else {
Assert.assertThat(value.compareTo(end), Matchers.lessThan(0));
}
}
}
}
use of org.apache.druid.timeline.partition.DimensionRangeShardSpec in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testAddNumberedShardSpecAfterMultiDimensionsShardSpecWithUnknownCorePartitionSize.
@Test
public void testAddNumberedShardSpecAfterMultiDimensionsShardSpecWithUnknownCorePartitionSize() throws IOException {
final String datasource = "datasource";
final Interval interval = Intervals.of("2020-01-01/P1D");
final String version = "version";
final List<String> dimensions = ImmutableList.of("dim");
final List<String> metrics = ImmutableList.of("met");
final Set<DataSegment> originalSegments = new HashSet<>();
for (int i = 0; i < 6; i++) {
originalSegments.add(new DataSegment(datasource, interval, version, ImmutableMap.of(), dimensions, metrics, new DimensionRangeShardSpec(Collections.singletonList("dim"), i == 0 ? null : StringTuple.create(String.valueOf(i - 1)), i == 5 ? null : StringTuple.create(String.valueOf(i)), i, // emulate shardSpecs created in older versions of Druid
null), 9, 10L));
}
coordinator.announceHistoricalSegments(originalSegments);
final SegmentIdWithShardSpec id = coordinator.allocatePendingSegment(datasource, "seq", null, interval, NumberedPartialShardSpec.instance(), version, false);
Assert.assertNull(id);
}
use of org.apache.druid.timeline.partition.DimensionRangeShardSpec in project druid by druid-io.
the class SegmentPublisherHelperTest method testAnnotateCorePartitionSetSizeForDimensionRangeShardSpec.
@Test
public void testAnnotateCorePartitionSetSizeForDimensionRangeShardSpec() {
final Set<DataSegment> segments = ImmutableSet.of(newSegment(new BuildingDimensionRangeShardSpec(0, Arrays.asList("dim1", "dim2"), null, StringTuple.create("a", "5"), 0)), newSegment(new BuildingDimensionRangeShardSpec(1, Arrays.asList("dim1", "dim2"), null, StringTuple.create("a", "5"), 1)), newSegment(new BuildingDimensionRangeShardSpec(2, Arrays.asList("dim1", "dim2"), null, StringTuple.create("a", "5"), 2)));
final Set<DataSegment> annotated = SegmentPublisherHelper.annotateShardSpec(segments);
for (DataSegment segment : annotated) {
Assert.assertSame(DimensionRangeShardSpec.class, segment.getShardSpec().getClass());
final DimensionRangeShardSpec shardSpec = (DimensionRangeShardSpec) segment.getShardSpec();
Assert.assertEquals(3, shardSpec.getNumCorePartitions());
}
}
Aggregations