use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceFailDbNotNullWantNull.
@Test
public void testTransactionalAnnounceFailDbNotNullWantNull() throws IOException {
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment), ImmutableSet.of(), new ObjectMetadata(null), new ObjectMetadata(ImmutableMap.of("foo", "baz")));
Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(defaultSegment)), result1);
final SegmentPublishResult result2 = coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment2), ImmutableSet.of(), new ObjectMetadata(null), new ObjectMetadata(ImmutableMap.of("foo", "baz")));
Assert.assertEquals(SegmentPublishResult.fail("java.lang.RuntimeException: Aborting transaction!"), result2);
// Should only be tried once per call.
Assert.assertEquals(2, metadataUpdateCounter.get());
}
use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceSuccess.
@Test
public void testTransactionalAnnounceSuccess() throws IOException {
// Insert first segment.
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment), ImmutableSet.of(), new ObjectMetadata(null), new ObjectMetadata(ImmutableMap.of("foo", "bar")));
Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(defaultSegment)), result1);
Assert.assertArrayEquals(mapper.writeValueAsString(defaultSegment).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", defaultSegment.getId().toString()));
// Insert second segment.
final SegmentPublishResult result2 = coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment2), ImmutableSet.of(), new ObjectMetadata(ImmutableMap.of("foo", "bar")), new ObjectMetadata(ImmutableMap.of("foo", "baz")));
Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(defaultSegment2)), result2);
Assert.assertArrayEquals(mapper.writeValueAsString(defaultSegment2).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", defaultSegment2.getId().toString()));
// Examine metadata.
Assert.assertEquals(new ObjectMetadata(ImmutableMap.of("foo", "baz")), coordinator.retrieveDataSourceMetadata("fooDataSource"));
// Should only be tried once per call.
Assert.assertEquals(2, metadataUpdateCounter.get());
}
use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceFailSegmentDropFailWithRetry.
@Test
public void testTransactionalAnnounceFailSegmentDropFailWithRetry() throws IOException {
insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
DataSegment nonExistingSegment = defaultSegment4;
Set<DataSegment> dropSegments = ImmutableSet.of(existingSegment1, nonExistingSegment);
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, dropSegments, null, null);
Assert.assertEquals(SegmentPublishResult.fail("org.apache.druid.metadata.RetryTransactionException: Aborting transaction!"), result1);
Assert.assertEquals(MAX_SQL_MEATADATA_RETRY_FOR_TEST, segmentTableDropUpdateCounter.get());
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
}
use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceSucceedWithSegmentDrop.
@Test
public void testTransactionalAnnounceSucceedWithSegmentDrop() throws IOException {
insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, ImmutableSet.of(existingSegment1, existingSegment2), null, null);
Assert.assertEquals(SegmentPublishResult.ok(SEGMENTS), result1);
for (DataSegment segment : SEGMENTS) {
Assert.assertArrayEquals(mapper.writeValueAsString(segment).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", segment.getId().toString()));
}
Assert.assertEquals(ImmutableList.of(defaultSegment.getId().toString(), defaultSegment2.getId().toString()), retrieveUsedSegmentIds());
}
use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceFailSegmentDropFailWithoutRetry.
@Test
public void testTransactionalAnnounceFailSegmentDropFailWithoutRetry() throws IOException {
insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
DataSegment dataSegmentBar = DataSegment.builder().dataSource("bar").interval(Intervals.of("2001/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build();
Set<DataSegment> dropSegments = ImmutableSet.of(existingSegment1, existingSegment2, dataSegmentBar);
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, dropSegments, null, null);
Assert.assertEquals(SegmentPublishResult.fail("java.lang.RuntimeException: Aborting transaction!"), result1);
// Should only be tried once. Since dropSegmentsWithHandle will return FAILURE (not TRY_AGAIN) as set of
// segments to drop contains more than one datasource.
Assert.assertEquals(1, segmentTableDropUpdateCounter.get());
Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
}
Aggregations