use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class CompactionTaskRunTest method testRunCompactionTwice.
@Test
public void testRunCompactionTwice() throws Exception {
runIndexTask();
final Builder builder = new Builder(DATA_SOURCE, segmentCacheManagerFactory, RETRY_POLICY_FACTORY);
final CompactionTask compactionTask1 = builder.interval(Intervals.of("2014-01-01/2014-01-02")).build();
Pair<TaskStatus, List<DataSegment>> resultPair = runTask(compactionTask1);
Assert.assertTrue(resultPair.lhs.isSuccess());
List<DataSegment> segments = resultPair.rhs;
Assert.assertEquals(3, segments.size());
for (int i = 0; i < 3; i++) {
Assert.assertEquals(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1), segments.get(i).getInterval());
Assert.assertEquals(getDefaultCompactionState(Granularities.HOUR, Granularities.MINUTE, ImmutableList.of(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1))), segments.get(i).getLastCompactionState());
if (lockGranularity == LockGranularity.SEGMENT) {
Assert.assertEquals(new NumberedOverwriteShardSpec(PartitionIds.NON_ROOT_GEN_START_PARTITION_ID, 0, 2, (short) 1, (short) 1), segments.get(i).getShardSpec());
} else {
Assert.assertEquals(new NumberedShardSpec(0, 1), segments.get(i).getShardSpec());
}
}
final CompactionTask compactionTask2 = builder.interval(Intervals.of("2014-01-01/2014-01-02")).build();
resultPair = runTask(compactionTask2);
Assert.assertTrue(resultPair.lhs.isSuccess());
segments = resultPair.rhs;
Assert.assertEquals(3, segments.size());
for (int i = 0; i < 3; i++) {
Assert.assertEquals(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1), segments.get(i).getInterval());
Assert.assertEquals(getDefaultCompactionState(Granularities.HOUR, Granularities.MINUTE, ImmutableList.of(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1))), segments.get(i).getLastCompactionState());
if (lockGranularity == LockGranularity.SEGMENT) {
Assert.assertEquals(new NumberedOverwriteShardSpec(PartitionIds.NON_ROOT_GEN_START_PARTITION_ID + 1, 0, 2, (short) 2, (short) 1), segments.get(i).getShardSpec());
} else {
Assert.assertEquals(new NumberedShardSpec(0, 1), segments.get(i).getShardSpec());
}
}
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class CompactionTaskRunTest method testWithGranularitySpecNonNullQueryGranularityAndNullSegmentGranularity.
@Test
public void testWithGranularitySpecNonNullQueryGranularityAndNullSegmentGranularity() throws Exception {
runIndexTask();
final Builder builder = new Builder(DATA_SOURCE, segmentCacheManagerFactory, RETRY_POLICY_FACTORY);
// day queryGranularity
final CompactionTask compactionTask1 = builder.interval(Intervals.of("2014-01-01/2014-01-02")).granularitySpec(new ClientCompactionTaskGranularitySpec(null, Granularities.SECOND, null)).build();
Pair<TaskStatus, List<DataSegment>> resultPair = runTask(compactionTask1);
Assert.assertTrue(resultPair.lhs.isSuccess());
List<DataSegment> segments = resultPair.rhs;
Assert.assertEquals(3, segments.size());
for (int i = 0; i < 3; i++) {
Assert.assertEquals(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1), segments.get(i).getInterval());
Assert.assertEquals(getDefaultCompactionState(Granularities.HOUR, Granularities.SECOND, ImmutableList.of(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1))), segments.get(i).getLastCompactionState());
if (lockGranularity == LockGranularity.SEGMENT) {
Assert.assertEquals(new NumberedOverwriteShardSpec(32768, 0, 2, (short) 1, (short) 1), segments.get(i).getShardSpec());
} else {
Assert.assertEquals(new NumberedShardSpec(0, 1), segments.get(i).getShardSpec());
}
}
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class CompactionTaskRunTest method testRunIndexAndCompactForSameSegmentAtTheSameTime2.
@Test
public void testRunIndexAndCompactForSameSegmentAtTheSameTime2() throws Exception {
runIndexTask();
final Builder builder = new Builder(DATA_SOURCE, segmentCacheManagerFactory, RETRY_POLICY_FACTORY);
final CompactionTask compactionTask = builder.interval(Intervals.of("2014-01-01T00:00:00/2014-01-02T03:00:00")).build();
// make sure that compactionTask becomes ready first, then the indexTask becomes ready, then compactionTask runs
final CountDownLatch indexTaskReadyLatch = new CountDownLatch(1);
final CountDownLatch compactionTaskStartLatch = new CountDownLatch(1);
final Future<Pair<TaskStatus, List<DataSegment>>> compactionFuture = exec.submit(() -> {
final Pair<TaskStatus, List<DataSegment>> pair = runTask(compactionTask, indexTaskReadyLatch, compactionTaskStartLatch);
return pair;
});
final Future<Pair<TaskStatus, List<DataSegment>>> indexFuture = exec.submit(() -> {
indexTaskReadyLatch.await();
return runIndexTask(compactionTaskStartLatch, null, false);
});
Assert.assertTrue(indexFuture.get().lhs.isSuccess());
List<DataSegment> segments = indexFuture.get().rhs;
Assert.assertEquals(6, segments.size());
for (int i = 0; i < 6; i++) {
Assert.assertEquals(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i / 2, i / 2 + 1), segments.get(i).getInterval());
if (lockGranularity == LockGranularity.SEGMENT) {
Assert.assertEquals(new NumberedOverwriteShardSpec(PartitionIds.NON_ROOT_GEN_START_PARTITION_ID + i % 2, 0, 2, (short) 1, (short) 2), segments.get(i).getShardSpec());
} else {
Assert.assertEquals(new NumberedShardSpec(i % 2, 2), segments.get(i).getShardSpec());
}
}
final Pair<TaskStatus, List<DataSegment>> compactionResult = compactionFuture.get();
Assert.assertEquals(TaskState.FAILED, compactionResult.lhs.getStatusCode());
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class CompactionTaskRunTest method testCompactionWithNewMetricInMetricsSpec.
@Test
public void testCompactionWithNewMetricInMetricsSpec() throws Exception {
runIndexTask();
final Builder builder = new Builder(DATA_SOURCE, segmentCacheManagerFactory, RETRY_POLICY_FACTORY);
// day segmentGranularity
final CompactionTask compactionTask = builder.interval(Intervals.of("2014-01-01/2014-01-02")).granularitySpec(new ClientCompactionTaskGranularitySpec(Granularities.DAY, null, null)).metricsSpec(new AggregatorFactory[] { new CountAggregatorFactory("cnt"), new LongSumAggregatorFactory("val", "val") }).build();
Pair<TaskStatus, List<DataSegment>> resultPair = runTask(compactionTask);
Assert.assertTrue(resultPair.lhs.isSuccess());
List<DataSegment> segments = resultPair.rhs;
Assert.assertEquals(1, segments.size());
Assert.assertEquals(Intervals.of("2014-01-01/2014-01-02"), segments.get(0).getInterval());
Assert.assertEquals(new NumberedShardSpec(0, 1), segments.get(0).getShardSpec());
ObjectMapper mapper = new DefaultObjectMapper();
Map<String, String> expectedCountMetric = new HashMap<>();
expectedCountMetric.put("type", "count");
expectedCountMetric.put("name", "cnt");
Map<String, String> expectedLongSumMetric = new HashMap<>();
expectedLongSumMetric.put("type", "longSum");
expectedLongSumMetric.put("name", "val");
expectedLongSumMetric.put("fieldName", "val");
expectedLongSumMetric.put("expression", null);
CompactionState expectedCompactionState = new CompactionState(new DynamicPartitionsSpec(5000000, Long.MAX_VALUE), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.of("ts", "dim"))), ImmutableList.of(expectedCountMetric, expectedLongSumMetric), getObjectMapper().readValue(getObjectMapper().writeValueAsString(compactionTask.getTransformSpec()), Map.class), mapper.readValue(mapper.writeValueAsString(new IndexSpec()), Map.class), mapper.readValue(mapper.writeValueAsString(new UniformGranularitySpec(Granularities.DAY, Granularities.MINUTE, true, ImmutableList.of(Intervals.of("2014-01-01T00:00:00/2014-01-01T03:00:00")))), Map.class));
Assert.assertEquals(expectedCompactionState, segments.get(0).getLastCompactionState());
}
use of org.apache.druid.timeline.partition.NumberedShardSpec in project druid by druid-io.
the class CompactionTaskRunTest method testWithGranularitySpecNullQueryGranularityAndNullSegmentGranularity.
@Test
public void testWithGranularitySpecNullQueryGranularityAndNullSegmentGranularity() throws Exception {
runIndexTask();
final Builder builder = new Builder(DATA_SOURCE, segmentCacheManagerFactory, RETRY_POLICY_FACTORY);
final CompactionTask compactionTask1 = builder.interval(Intervals.of("2014-01-01/2014-01-02")).granularitySpec(new ClientCompactionTaskGranularitySpec(null, null, null)).build();
Pair<TaskStatus, List<DataSegment>> resultPair = runTask(compactionTask1);
Assert.assertTrue(resultPair.lhs.isSuccess());
List<DataSegment> segments = resultPair.rhs;
Assert.assertEquals(3, segments.size());
for (int i = 0; i < 3; i++) {
Assert.assertEquals(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1), segments.get(i).getInterval());
Assert.assertEquals(getDefaultCompactionState(Granularities.HOUR, Granularities.MINUTE, ImmutableList.of(Intervals.of("2014-01-01T0%d:00:00/2014-01-01T0%d:00:00", i, i + 1))), segments.get(i).getLastCompactionState());
if (lockGranularity == LockGranularity.SEGMENT) {
Assert.assertEquals(new NumberedOverwriteShardSpec(32768, 0, 2, (short) 1, (short) 1), segments.get(i).getShardSpec());
} else {
Assert.assertEquals(new NumberedShardSpec(0, 1), segments.get(i).getShardSpec());
}
}
}
Aggregations