use of org.apache.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceFailSegmentDropFailWithoutRetry.
@Test
public void testTransactionalAnnounceFailSegmentDropFailWithoutRetry() throws IOException {
insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
DataSegment dataSegmentBar = DataSegment.builder().dataSource("bar").interval(Intervals.of("2001/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build();
Set<DataSegment> dropSegments = ImmutableSet.of(existingSegment1, existingSegment2, dataSegmentBar);
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, dropSegments, null, null);
Assert.assertEquals(SegmentPublishResult.fail("java.lang.RuntimeException: Aborting transaction!"), result1);
// Should only be tried once. Since dropSegmentsWithHandle will return FAILURE (not TRY_AGAIN) as set of
// segments to drop contains more than one datasource.
Assert.assertEquals(1, segmentTableDropUpdateCounter.get());
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
}
use of org.apache.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class ActionBasedUsedSegmentCheckerTest method testBasic.
@Test
public void testBasic() throws IOException {
final TaskActionClient taskActionClient = EasyMock.createMock(TaskActionClient.class);
EasyMock.expect(taskActionClient.submit(new RetrieveUsedSegmentsAction("bar", Intervals.of("2002/P1D"), null, Segments.ONLY_VISIBLE))).andReturn(ImmutableList.of(DataSegment.builder().dataSource("bar").interval(Intervals.of("2002/P1D")).shardSpec(new LinearShardSpec(0)).version("b").size(0).build(), DataSegment.builder().dataSource("bar").interval(Intervals.of("2002/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build()));
EasyMock.expect(taskActionClient.submit(new RetrieveUsedSegmentsAction("foo", null, ImmutableList.of(Intervals.of("2000/P1D"), Intervals.of("2001/P1D")), Segments.ONLY_VISIBLE))).andReturn(ImmutableList.of(DataSegment.builder().dataSource("foo").interval(Intervals.of("2000/P1D")).shardSpec(new LinearShardSpec(0)).version("a").size(0).build(), DataSegment.builder().dataSource("foo").interval(Intervals.of("2000/P1D")).shardSpec(new LinearShardSpec(1)).version("a").size(0).build(), DataSegment.builder().dataSource("foo").interval(Intervals.of("2001/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build(), DataSegment.builder().dataSource("foo").interval(Intervals.of("2002/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build()));
EasyMock.replay(taskActionClient);
final UsedSegmentChecker checker = new ActionBasedUsedSegmentChecker(taskActionClient);
final Set<DataSegment> segments = checker.findUsedSegments(ImmutableSet.of(new SegmentIdWithShardSpec("foo", Intervals.of("2000/P1D"), "a", new LinearShardSpec(1)), new SegmentIdWithShardSpec("foo", Intervals.of("2001/P1D"), "b", new LinearShardSpec(0)), new SegmentIdWithShardSpec("bar", Intervals.of("2002/P1D"), "b", new LinearShardSpec(0))));
Assert.assertEquals(ImmutableSet.of(DataSegment.builder().dataSource("foo").interval(Intervals.of("2000/P1D")).shardSpec(new LinearShardSpec(1)).version("a").size(0).build(), DataSegment.builder().dataSource("bar").interval(Intervals.of("2002/P1D")).shardSpec(new LinearShardSpec(0)).version("b").size(0).build()), segments);
EasyMock.verify(taskActionClient);
}
use of org.apache.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class SequenceMetadataTest method testPublishAnnotatedSegmentsThrowExceptionIfDropSegmentsNotNullAndNotEmpty.
@Test
public void testPublishAnnotatedSegmentsThrowExceptionIfDropSegmentsNotNullAndNotEmpty() throws Exception {
DataSegment dataSegment = DataSegment.builder().dataSource("foo").interval(Intervals.of("2001/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build();
Set<DataSegment> notNullNotEmptySegment = ImmutableSet.of(dataSegment);
SequenceMetadata<Integer, Integer> sequenceMetadata = new SequenceMetadata<>(1, "test", ImmutableMap.of(), ImmutableMap.of(), true, ImmutableSet.of());
TransactionalSegmentPublisher transactionalSegmentPublisher = sequenceMetadata.createPublisher(mockSeekableStreamIndexTaskRunner, mockTaskToolbox, true);
expectedException.expect(ISE.class);
expectedException.expectMessage("Stream ingestion task unexpectedly attempted to drop segments: " + SegmentUtils.commaSeparatedIdentifiers(notNullNotEmptySegment));
transactionalSegmentPublisher.publishAnnotatedSegments(null, notNullNotEmptySegment, ImmutableSet.of(), null);
}
use of org.apache.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class TDigestSketchSqlAggregatorTest method createQuerySegmentWalker.
@Override
public SpecificSegmentsQuerySegmentWalker createQuerySegmentWalker() throws IOException {
TDigestSketchModule.registerSerde();
final QueryableIndex index = IndexBuilder.create(CalciteTests.getJsonMapper()).tmpDir(temporaryFolder.newFolder()).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new TDigestSketchAggregatorFactory("qsketch_m1", "m1", 128)).withRollup(false).build()).rows(CalciteTests.ROWS1).buildMMappedIndex();
return new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index);
}
use of org.apache.druid.timeline.partition.LinearShardSpec in project druid by druid-io.
the class QuantileSqlAggregatorTest method createQuerySegmentWalker.
@Override
public SpecificSegmentsQuerySegmentWalker createQuerySegmentWalker() throws IOException {
ApproximateHistogramDruidModule.registerSerde();
final QueryableIndex index = IndexBuilder.create(CalciteTests.getJsonMapper()).tmpDir(temporaryFolder.newFolder()).segmentWriteOutMediumFactory(OffHeapMemorySegmentWriteOutMediumFactory.instance()).schema(new IncrementalIndexSchema.Builder().withMetrics(new CountAggregatorFactory("cnt"), new DoubleSumAggregatorFactory("m1", "m1"), new ApproximateHistogramAggregatorFactory("hist_m1", "m1", null, null, null, null, false)).withRollup(false).build()).rows(CalciteTests.ROWS1).buildMMappedIndex();
return new SpecificSegmentsQuerySegmentWalker(conglomerate).add(DataSegment.builder().dataSource(CalciteTests.DATASOURCE1).interval(index.getDataInterval()).version("1").shardSpec(new LinearShardSpec(0)).size(0).build(), index);
}
Aggregations