use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testAddNumberedShardSpecAfterSingleDimensionsShardSpecWithUnknownCorePartitionSize.
@Test
public void testAddNumberedShardSpecAfterSingleDimensionsShardSpecWithUnknownCorePartitionSize() throws IOException {
final String datasource = "datasource";
final Interval interval = Intervals.of("2020-01-01/P1D");
final String version = "version";
final List<String> dimensions = ImmutableList.of("dim");
final List<String> metrics = ImmutableList.of("met");
final Set<DataSegment> originalSegments = new HashSet<>();
for (int i = 0; i < 6; i++) {
final String start = i == 0 ? null : String.valueOf(i - 1);
final String end = i == 5 ? null : String.valueOf(i);
originalSegments.add(new DataSegment(datasource, interval, version, ImmutableMap.of(), dimensions, metrics, new SingleDimensionShardSpec("dim", start, end, i, // emulate shardSpecs created in older versions of Druid
null), 9, 10L));
}
coordinator.announceHistoricalSegments(originalSegments);
final SegmentIdWithShardSpec id = coordinator.allocatePendingSegment(datasource, "seq", null, interval, NumberedPartialShardSpec.instance(), version, false);
Assert.assertNull(id);
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testAllocatePendingSegmentsWithOvershadowingSegments.
@Test
public void testAllocatePendingSegmentsWithOvershadowingSegments() throws IOException {
final String dataSource = "ds";
final Interval interval = Intervals.of("2017-01-01/2017-02-01");
String prevSegmentId = null;
for (int i = 0; i < 10; i++) {
final SegmentIdWithShardSpec identifier = coordinator.allocatePendingSegment(dataSource, "seq", prevSegmentId, interval, new NumberedOverwritePartialShardSpec(0, 1, (short) (i + 1)), "version", false);
Assert.assertEquals(StringUtils.format("ds_2017-01-01T00:00:00.000Z_2017-02-01T00:00:00.000Z_version%s", "_" + (i + PartitionIds.NON_ROOT_GEN_START_PARTITION_ID)), identifier.toString());
prevSegmentId = identifier.toString();
final Set<DataSegment> toBeAnnounced = Collections.singleton(new DataSegment(identifier.getDataSource(), identifier.getInterval(), identifier.getVersion(), null, Collections.emptyList(), Collections.emptyList(), ((NumberedOverwriteShardSpec) identifier.getShardSpec()).withAtomicUpdateGroupSize(1), 0, 10L));
final Set<DataSegment> announced = coordinator.announceHistoricalSegments(toBeAnnounced);
Assert.assertEquals(toBeAnnounced, announced);
}
final Collection<DataSegment> visibleSegments = coordinator.retrieveUsedSegmentsForInterval(dataSource, interval, Segments.ONLY_VISIBLE);
Assert.assertEquals(1, visibleSegments.size());
Assert.assertEquals(new DataSegment(dataSource, interval, "version", null, Collections.emptyList(), Collections.emptyList(), new NumberedOverwriteShardSpec(9 + PartitionIds.NON_ROOT_GEN_START_PARTITION_ID, 0, 1, (short) 9, (short) 1), 0, 10L), Iterables.getOnlyElement(visibleSegments));
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testAnnounceHistoricalSegments.
@Test
public void testAnnounceHistoricalSegments() throws IOException {
Set<DataSegment> segments = new HashSet<>();
for (int i = 0; i < 105; i++) {
segments.add(new DataSegment("fooDataSource", Intervals.of("2015-01-01T00Z/2015-01-02T00Z"), "version", ImmutableMap.of(), ImmutableList.of("dim1"), ImmutableList.of("m1"), new LinearShardSpec(i), 9, 100));
}
coordinator.announceHistoricalSegments(segments);
for (DataSegment segment : segments) {
Assert.assertArrayEquals(mapper.writeValueAsString(segment).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", segment.getId().toString()));
}
List<String> segmentIds = segments.stream().map(segment -> segment.getId().toString()).collect(Collectors.toList());
segmentIds.sort(Comparator.naturalOrder());
Assert.assertEquals(segmentIds, retrieveUsedSegmentIds());
// Should not update dataSource metadata.
Assert.assertEquals(0, metadataUpdateCounter.get());
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceFailSegmentDropFailWithRetry.
@Test
public void testTransactionalAnnounceFailSegmentDropFailWithRetry() throws IOException {
insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
DataSegment nonExistingSegment = defaultSegment4;
Set<DataSegment> dropSegments = ImmutableSet.of(existingSegment1, nonExistingSegment);
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, dropSegments, null, null);
Assert.assertEquals(SegmentPublishResult.fail("org.apache.druid.metadata.RetryTransactionException: Aborting transaction!"), result1);
Assert.assertEquals(MAX_SQL_MEATADATA_RETRY_FOR_TEST, segmentTableDropUpdateCounter.get());
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
}
use of org.apache.druid.timeline.DataSegment in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceSucceedWithSegmentDrop.
@Test
public void testTransactionalAnnounceSucceedWithSegmentDrop() throws IOException {
insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, ImmutableSet.of(existingSegment1, existingSegment2), null, null);
Assert.assertEquals(SegmentPublishResult.ok(SEGMENTS), result1);
for (DataSegment segment : SEGMENTS) {
Assert.assertArrayEquals(mapper.writeValueAsString(segment).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", segment.getId().toString()));
}
Assert.assertEquals(ImmutableList.of(defaultSegment.getId().toString(), defaultSegment2.getId().toString()), retrieveUsedSegmentIds());
}
Aggregations