use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class CompactionTaskRunTest method testRunIndexAndCompactForSameSegmentAtTheSameTime.
@Test
public void testRunIndexAndCompactForSameSegmentAtTheSameTime() throws Exception {
runIndexTask();
// make sure that indexTask becomes ready first, then compactionTask becomes ready, then indexTask runs
final CountDownLatch compactionTaskReadyLatch = new CountDownLatch(1);
final CountDownLatch indexTaskStartLatch = new CountDownLatch(1);
final Future<Pair<TaskStatus, List<DataSegment>>> indexFuture = exec.submit(() -> runIndexTask(compactionTaskReadyLatch, indexTaskStartLatch, false));
final Builder builder = new Builder(DATA_SOURCE, segmentCacheManagerFactory, RETRY_POLICY_FACTORY);
final CompactionTask compactionTask = builder.interval(Intervals.of("2014-01-01T00:00:00/2014-01-02T03:00:00")).build();
final Future<Pair<TaskStatus, List<DataSegment>>> compactionFuture = exec.submit(() -> {
compactionTaskReadyLatch.await();
return runTask(compactionTask, indexTaskStartLatch, null);
});
Assert.assertTrue(indexFuture.get().lhs.isSuccess());
List<DataSegment> segments = indexFuture.get().rhs;
Assert.assertEquals(6, segments.size());
for (int i = 0; i < 6; i++) {
Assert.assertEquals(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i / 2, i / 2 + 1), segments.get(i).getInterval());
if (lockGranularity == LockGranularity.SEGMENT) {
Assert.assertEquals(new NumberedOverwriteShardSpec(PartitionIds.NON_ROOT_GEN_START_PARTITION_ID + i % 2, 0, 2, (short) 1, (short) 2), segments.get(i).getShardSpec());
} else {
Assert.assertEquals(new NumberedShardSpec(i % 2, 2), segments.get(i).getShardSpec());
}
}
final Pair<TaskStatus, List<DataSegment>> compactionResult = compactionFuture.get();
Assert.assertEquals(TaskState.FAILED, compactionResult.lhs.getStatusCode());
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class IndexTaskTest method testOverwriteWithSameSegmentGranularity.
@Test
public void testOverwriteWithSameSegmentGranularity() throws Exception {
final File tmpDir = temporaryFolder.newFolder();
final File tmpFile = File.createTempFile("druid", "index", tmpDir);
populateRollupTestData(tmpFile);
for (int i = 0; i < 2; i++) {
final IndexTask indexTask = new IndexTask(null, null, createDefaultIngestionSpec(jsonMapper, tmpDir, new UniformGranularitySpec(Granularities.DAY, Granularities.DAY, true, null), null, createTuningConfig(3, 2, null, 2L, null, false, true), false, false), null);
final List<DataSegment> segments = runTask(indexTask).rhs;
Assert.assertEquals(5, segments.size());
final Interval expectedInterval = Intervals.of("2014-01-01T00:00:00.000Z/2014-01-02T00:00:00.000Z");
for (int j = 0; j < 5; j++) {
final DataSegment segment = segments.get(j);
Assert.assertEquals(DATASOURCE, segment.getDataSource());
Assert.assertEquals(expectedInterval, segment.getInterval());
if (i == 0) {
Assert.assertEquals(NumberedShardSpec.class, segment.getShardSpec().getClass());
Assert.assertEquals(j, segment.getShardSpec().getPartitionNum());
} else {
if (lockGranularity == LockGranularity.SEGMENT) {
Assert.assertEquals(NumberedOverwriteShardSpec.class, segment.getShardSpec().getClass());
final NumberedOverwriteShardSpec numberedOverwriteShardSpec = (NumberedOverwriteShardSpec) segment.getShardSpec();
Assert.assertEquals(j + PartitionIds.NON_ROOT_GEN_START_PARTITION_ID, numberedOverwriteShardSpec.getPartitionNum());
Assert.assertEquals(1, numberedOverwriteShardSpec.getMinorVersion());
Assert.assertEquals(5, numberedOverwriteShardSpec.getAtomicUpdateGroupSize());
Assert.assertEquals(0, numberedOverwriteShardSpec.getStartRootPartitionId());
Assert.assertEquals(5, numberedOverwriteShardSpec.getEndRootPartitionId());
} else {
Assert.assertEquals(NumberedShardSpec.class, segment.getShardSpec().getClass());
final NumberedShardSpec numberedShardSpec = (NumberedShardSpec) segment.getShardSpec();
Assert.assertEquals(j, numberedShardSpec.getPartitionNum());
}
}
}
}
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class RangePartitionMultiPhaseParallelIndexingTest method testAppendLinearlyPartitionedSegmentsToHashPartitionedDatasourceSuccessfullyAppend.
@Test
public void testAppendLinearlyPartitionedSegmentsToHashPartitionedDatasourceSuccessfullyAppend() {
if (useMultivalueDim) {
return;
}
final int targetRowsPerSegment = NUM_ROW / DIM_FILE_CARDINALITY / NUM_PARTITION;
final Set<DataSegment> publishedSegments = new HashSet<>();
publishedSegments.addAll(runTestTask(new SingleDimensionPartitionsSpec(targetRowsPerSegment, null, DIM1, false), TaskState.SUCCESS, false));
// Append
publishedSegments.addAll(runTestTask(new DynamicPartitionsSpec(5, null), TaskState.SUCCESS, true));
// And append again
publishedSegments.addAll(runTestTask(new DynamicPartitionsSpec(10, null), TaskState.SUCCESS, true));
final Map<Interval, List<DataSegment>> intervalToSegments = new HashMap<>();
publishedSegments.forEach(segment -> intervalToSegments.computeIfAbsent(segment.getInterval(), k -> new ArrayList<>()).add(segment));
for (Entry<Interval, List<DataSegment>> entry : intervalToSegments.entrySet()) {
final List<DataSegment> segments = entry.getValue();
final List<DataSegment> rangedSegments = segments.stream().filter(segment -> segment.getShardSpec().getClass() == SingleDimensionShardSpec.class).collect(Collectors.toList());
final List<DataSegment> linearSegments = segments.stream().filter(segment -> segment.getShardSpec().getClass() == NumberedShardSpec.class).collect(Collectors.toList());
for (DataSegment rangedSegment : rangedSegments) {
final SingleDimensionShardSpec rangeShardSpec = (SingleDimensionShardSpec) rangedSegment.getShardSpec();
for (DataSegment linearSegment : linearSegments) {
Assert.assertEquals(rangedSegment.getInterval(), linearSegment.getInterval());
Assert.assertEquals(rangedSegment.getVersion(), linearSegment.getVersion());
final NumberedShardSpec numberedShardSpec = (NumberedShardSpec) linearSegment.getShardSpec();
Assert.assertEquals(rangeShardSpec.getNumCorePartitions(), numberedShardSpec.getNumCorePartitions());
Assert.assertTrue(rangeShardSpec.getPartitionNum() < numberedShardSpec.getPartitionNum());
}
}
}
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class HashPartitionMultiPhaseParallelIndexingTest method testAppendLinearlyPartitionedSegmensToHashPartitionedDatasourceSuccessfullyAppend.
@Test
public void testAppendLinearlyPartitionedSegmensToHashPartitionedDatasourceSuccessfullyAppend() {
final Set<DataSegment> publishedSegments = new HashSet<>();
publishedSegments.addAll(runTestTask(new HashedPartitionsSpec(null, numShards, ImmutableList.of("dim1", "dim2")), TaskState.SUCCESS, false));
// Append
publishedSegments.addAll(runTestTask(new DynamicPartitionsSpec(5, null), TaskState.SUCCESS, true));
// And append again
publishedSegments.addAll(runTestTask(new DynamicPartitionsSpec(10, null), TaskState.SUCCESS, true));
final Map<Interval, List<DataSegment>> intervalToSegments = new HashMap<>();
publishedSegments.forEach(segment -> intervalToSegments.computeIfAbsent(segment.getInterval(), k -> new ArrayList<>()).add(segment));
for (Entry<Interval, List<DataSegment>> entry : intervalToSegments.entrySet()) {
final List<DataSegment> segments = entry.getValue();
final List<DataSegment> hashedSegments = segments.stream().filter(segment -> segment.getShardSpec().getClass() == HashBasedNumberedShardSpec.class).collect(Collectors.toList());
final List<DataSegment> linearSegments = segments.stream().filter(segment -> segment.getShardSpec().getClass() == NumberedShardSpec.class).collect(Collectors.toList());
for (DataSegment hashedSegment : hashedSegments) {
final HashBasedNumberedShardSpec hashShardSpec = (HashBasedNumberedShardSpec) hashedSegment.getShardSpec();
for (DataSegment linearSegment : linearSegments) {
Assert.assertEquals(hashedSegment.getInterval(), linearSegment.getInterval());
Assert.assertEquals(hashedSegment.getVersion(), linearSegment.getVersion());
final NumberedShardSpec numberedShardSpec = (NumberedShardSpec) linearSegment.getShardSpec();
Assert.assertEquals(hashShardSpec.getNumCorePartitions(), numberedShardSpec.getNumCorePartitions());
Assert.assertTrue(hashShardSpec.getPartitionNum() < numberedShardSpec.getPartitionNum());
}
}
}
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class IngestSegmentFirehoseFactoryTest method buildSegment.
private static DataSegment buildSegment(Integer shardNumber) {
Preconditions.checkArgument(shardNumber < MAX_SHARD_NUMBER);
Preconditions.checkArgument(shardNumber >= 0);
return new DataSegment(DATA_SOURCE_NAME, Intervals.ETERNITY, DATA_SOURCE_VERSION, ImmutableMap.of("type", "local", "path", PERSIST_DIR.getAbsolutePath()), ImmutableList.of(DIM_NAME), ImmutableList.of(METRIC_LONG_NAME, METRIC_FLOAT_NAME), new NumberedShardSpec(shardNumber, MAX_SHARD_NUMBER), BINARY_VERSION, 0L);
}
Aggregations