use of org.apache.druid.timeline.partition.NumberedOverwriteShardSpec in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testAllocatePendingSegmentsWithOvershadowingSegments.
@Test
public void testAllocatePendingSegmentsWithOvershadowingSegments() throws IOException {
final String dataSource = "ds";
final Interval interval = Intervals.of("2017-01-01/2017-02-01");
String prevSegmentId = null;
for (int i = 0; i < 10; i++) {
final SegmentIdWithShardSpec identifier = coordinator.allocatePendingSegment(dataSource, "seq", prevSegmentId, interval, new NumberedOverwritePartialShardSpec(0, 1, (short) (i + 1)), "version", false);
Assert.assertEquals(StringUtils.format("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_version%s", "_" + (i + PartitionIds.NON_ROOT_GEN_START_PARTITION_ID)), identifier.toString());
prevSegmentId = identifier.toString();
final Set<DataSegment> toBeAnnounced = Collections.singleton(new DataSegment(identifier.getDataSource(), identifier.getInterval(), identifier.getVersion(), null, Collections.emptyList(), Collections.emptyList(), ((NumberedOverwriteShardSpec) identifier.getShardSpec()).withAtomicUpdateGroupSize(1), 0, 10L));
final Set<DataSegment> announced = coordinator.announceHistoricalSegments(toBeAnnounced);
Assert.assertEquals(toBeAnnounced, announced);
}
final Collection<DataSegment> visibleSegments = coordinator.retrieveUsedSegmentsForInterval(dataSource, interval, Segments.ONLY_VISIBLE);
Assert.assertEquals(1, visibleSegments.size());
Assert.assertEquals(new DataSegment(dataSource, interval, "version", null, Collections.emptyList(), Collections.emptyList(), new NumberedOverwriteShardSpec(9 + PartitionIds.NON_ROOT_GEN_START_PARTITION_ID, 0, 1, (short) 9, (short) 1), 0, 10L), Iterables.getOnlyElement(visibleSegments));
}
use of org.apache.druid.timeline.partition.NumberedOverwriteShardSpec in project druid by druid-io.
the class SegmentManagerTest method testLoadAndDropNonRootGenerationSegment.
@Test
public void testLoadAndDropNonRootGenerationSegment() throws SegmentLoadingException {
final DataSegment segment = new DataSegment("small_source", Intervals.of("0/1000"), "0", ImmutableMap.of("interval", Intervals.of("0/1000"), "version", 0), new ArrayList<>(), new ArrayList<>(), new NumberedOverwriteShardSpec(PartitionIds.NON_ROOT_GEN_START_PARTITION_ID + 10, 10, 20, (short) 1, (short) 1), 0, 10);
segmentManager.loadSegment(segment, false, SegmentLazyLoadFailCallback.NOOP);
assertResult(ImmutableList.of(segment));
segmentManager.dropSegment(segment);
assertResult(ImmutableList.of());
}
use of org.apache.druid.timeline.partition.NumberedOverwriteShardSpec in project druid by druid-io.
the class CompactionTaskParallelRunTest method testCompactRangeAndDynamicPartitionedSegments.
@Test
public void testCompactRangeAndDynamicPartitionedSegments() {
runIndexTask(new SingleDimensionPartitionsSpec(2, null, "dim", false), false);
runIndexTask(null, true);
final Builder builder = new Builder(DATA_SOURCE, getSegmentCacheManagerFactory(), RETRY_POLICY_FACTORY);
final CompactionTask compactionTask = builder.inputSpec(new CompactionIntervalSpec(INTERVAL_TO_INDEX, null)).tuningConfig(AbstractParallelIndexSupervisorTaskTest.DEFAULT_TUNING_CONFIG_FOR_PARALLEL_INDEXING).build();
final Map<Interval, List<DataSegment>> intervalToSegments = SegmentUtils.groupSegmentsByInterval(runTask(compactionTask));
Assert.assertEquals(3, intervalToSegments.size());
Assert.assertEquals(ImmutableSet.of(Intervals.of("2014-01-01T00/PT1H"), Intervals.of("2014-01-01T01/PT1H"), Intervals.of("2014-01-01T02/PT1H")), intervalToSegments.keySet());
for (Entry<Interval, List<DataSegment>> entry : intervalToSegments.entrySet()) {
final List<DataSegment> segmentsInInterval = entry.getValue();
Assert.assertEquals(1, segmentsInInterval.size());
final ShardSpec shardSpec = segmentsInInterval.get(0).getShardSpec();
if (lockGranularity == LockGranularity.TIME_CHUNK) {
Assert.assertSame(NumberedShardSpec.class, shardSpec.getClass());
final NumberedShardSpec numberedShardSpec = (NumberedShardSpec) shardSpec;
Assert.assertEquals(0, numberedShardSpec.getPartitionNum());
Assert.assertEquals(1, numberedShardSpec.getNumCorePartitions());
} else {
Assert.assertSame(NumberedOverwriteShardSpec.class, shardSpec.getClass());
final NumberedOverwriteShardSpec numberedShardSpec = (NumberedOverwriteShardSpec) shardSpec;
Assert.assertEquals(PartitionIds.NON_ROOT_GEN_START_PARTITION_ID, numberedShardSpec.getPartitionNum());
Assert.assertEquals(1, numberedShardSpec.getAtomicUpdateGroupSize());
}
}
}
use of org.apache.druid.timeline.partition.NumberedOverwriteShardSpec in project druid by druid-io.
the class CompactionTaskParallelRunTest method testCompactHashAndDynamicPartitionedSegments.
@Test
public void testCompactHashAndDynamicPartitionedSegments() {
runIndexTask(new HashedPartitionsSpec(null, 2, null), false);
runIndexTask(null, true);
final Builder builder = new Builder(DATA_SOURCE, getSegmentCacheManagerFactory(), RETRY_POLICY_FACTORY);
final CompactionTask compactionTask = builder.inputSpec(new CompactionIntervalSpec(INTERVAL_TO_INDEX, null)).tuningConfig(AbstractParallelIndexSupervisorTaskTest.DEFAULT_TUNING_CONFIG_FOR_PARALLEL_INDEXING).build();
final Map<Interval, List<DataSegment>> intervalToSegments = SegmentUtils.groupSegmentsByInterval(runTask(compactionTask));
Assert.assertEquals(3, intervalToSegments.size());
Assert.assertEquals(ImmutableSet.of(Intervals.of("2014-01-01T00/PT1H"), Intervals.of("2014-01-01T01/PT1H"), Intervals.of("2014-01-01T02/PT1H")), intervalToSegments.keySet());
for (Entry<Interval, List<DataSegment>> entry : intervalToSegments.entrySet()) {
final List<DataSegment> segmentsInInterval = entry.getValue();
Assert.assertEquals(1, segmentsInInterval.size());
final ShardSpec shardSpec = segmentsInInterval.get(0).getShardSpec();
if (lockGranularity == LockGranularity.TIME_CHUNK) {
Assert.assertSame(NumberedShardSpec.class, shardSpec.getClass());
final NumberedShardSpec numberedShardSpec = (NumberedShardSpec) shardSpec;
Assert.assertEquals(0, numberedShardSpec.getPartitionNum());
Assert.assertEquals(1, numberedShardSpec.getNumCorePartitions());
} else {
Assert.assertSame(NumberedOverwriteShardSpec.class, shardSpec.getClass());
final NumberedOverwriteShardSpec numberedShardSpec = (NumberedOverwriteShardSpec) shardSpec;
Assert.assertEquals(PartitionIds.NON_ROOT_GEN_START_PARTITION_ID, numberedShardSpec.getPartitionNum());
Assert.assertEquals(1, numberedShardSpec.getAtomicUpdateGroupSize());
}
}
}
use of org.apache.druid.timeline.partition.NumberedOverwriteShardSpec in project druid by druid-io.
the class CompactionTaskRunTest method testRunWithDynamicPartitioning.
@Test
public void testRunWithDynamicPartitioning() throws Exception {
runIndexTask();
final Builder builder = new Builder(DATA_SOURCE, segmentCacheManagerFactory, RETRY_POLICY_FACTORY);
final CompactionTask compactionTask = builder.interval(Intervals.of("2014-01-01/2014-01-02")).build();
final Pair<TaskStatus, List<DataSegment>> resultPair = runTask(compactionTask);
Assert.assertTrue(resultPair.lhs.isSuccess());
final List<DataSegment> segments = resultPair.rhs;
Assert.assertEquals(3, segments.size());
for (int i = 0; i < 3; i++) {
Assert.assertEquals(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1), segments.get(i).getInterval());
Assert.assertEquals(getDefaultCompactionState(Granularities.HOUR, Granularities.MINUTE, ImmutableList.of(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1))), segments.get(i).getLastCompactionState());
if (lockGranularity == LockGranularity.SEGMENT) {
Assert.assertEquals(new NumberedOverwriteShardSpec(32768, 0, 2, (short) 1, (short) 1), segments.get(i).getShardSpec());
} else {
Assert.assertEquals(new NumberedShardSpec(0, 1), segments.get(i).getShardSpec());
}
}
List<String> rowsFromSegment = getCSVFormatRowsFromSegments(segments);
Assert.assertEquals(TEST_ROWS, rowsFromSegment);
}
Aggregations