Search in sources :

Example 1 with SegmentPublishResult

use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceFailDbNotNullWantNull.

@Test
public void testTransactionalAnnounceFailDbNotNullWantNull() throws IOException {
    final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment), ImmutableSet.of(), new ObjectMetadata(null), new ObjectMetadata(ImmutableMap.of("foo", "baz")));
    Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(defaultSegment)), result1);
    final SegmentPublishResult result2 = coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment2), ImmutableSet.of(), new ObjectMetadata(null), new ObjectMetadata(ImmutableMap.of("foo", "baz")));
    Assert.assertEquals(SegmentPublishResult.fail("java.lang.RuntimeException: Aborting transaction!"), result2);
    // Should only be tried once per call.
    Assert.assertEquals(2, metadataUpdateCounter.get());
}
Also used : SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) ObjectMetadata(org.apache.druid.indexing.overlord.ObjectMetadata) Test(org.junit.Test)

Example 2 with SegmentPublishResult

use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceSuccess.

@Test
public void testTransactionalAnnounceSuccess() throws IOException {
    // Insert first segment.
    final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment), ImmutableSet.of(), new ObjectMetadata(null), new ObjectMetadata(ImmutableMap.of("foo", "bar")));
    Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(defaultSegment)), result1);
    Assert.assertArrayEquals(mapper.writeValueAsString(defaultSegment).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", defaultSegment.getId().toString()));
    // Insert second segment.
    final SegmentPublishResult result2 = coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment2), ImmutableSet.of(), new ObjectMetadata(ImmutableMap.of("foo", "bar")), new ObjectMetadata(ImmutableMap.of("foo", "baz")));
    Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(defaultSegment2)), result2);
    Assert.assertArrayEquals(mapper.writeValueAsString(defaultSegment2).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", defaultSegment2.getId().toString()));
    // Examine metadata.
    Assert.assertEquals(new ObjectMetadata(ImmutableMap.of("foo", "baz")), coordinator.retrieveDataSourceMetadata("fooDataSource"));
    // Should only be tried once per call.
    Assert.assertEquals(2, metadataUpdateCounter.get());
}
Also used : SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) ObjectMetadata(org.apache.druid.indexing.overlord.ObjectMetadata) Test(org.junit.Test)

Example 3 with SegmentPublishResult

use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceFailSegmentDropFailWithRetry.

@Test
public void testTransactionalAnnounceFailSegmentDropFailWithRetry() throws IOException {
    insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
    Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
    DataSegment nonExistingSegment = defaultSegment4;
    Set<DataSegment> dropSegments = ImmutableSet.of(existingSegment1, nonExistingSegment);
    final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, dropSegments, null, null);
    Assert.assertEquals(SegmentPublishResult.fail("org.apache.druid.metadata.RetryTransactionException: Aborting transaction!"), result1);
    Assert.assertEquals(MAX_SQL_MEATADATA_RETRY_FOR_TEST, segmentTableDropUpdateCounter.get());
    Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
}
Also used : SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 4 with SegmentPublishResult

use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceSucceedWithSegmentDrop.

@Test
public void testTransactionalAnnounceSucceedWithSegmentDrop() throws IOException {
    insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
    Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
    final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, ImmutableSet.of(existingSegment1, existingSegment2), null, null);
    Assert.assertEquals(SegmentPublishResult.ok(SEGMENTS), result1);
    for (DataSegment segment : SEGMENTS) {
        Assert.assertArrayEquals(mapper.writeValueAsString(segment).getBytes(StandardCharsets.UTF_8), derbyConnector.lookup(derbyConnectorRule.metadataTablesConfigSupplier().get().getSegmentsTable(), "id", "payload", segment.getId().toString()));
    }
    Assert.assertEquals(ImmutableList.of(defaultSegment.getId().toString(), defaultSegment2.getId().toString()), retrieveUsedSegmentIds());
}
Also used : SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Example 5 with SegmentPublishResult

use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.

the class IndexerSQLMetadataStorageCoordinatorTest method testTransactionalAnnounceFailSegmentDropFailWithoutRetry.

@Test
public void testTransactionalAnnounceFailSegmentDropFailWithoutRetry() throws IOException {
    insertUsedSegments(ImmutableSet.of(existingSegment1, existingSegment2));
    Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
    DataSegment dataSegmentBar = DataSegment.builder().dataSource("bar").interval(Intervals.of("2001/P1D")).shardSpec(new LinearShardSpec(1)).version("b").size(0).build();
    Set<DataSegment> dropSegments = ImmutableSet.of(existingSegment1, existingSegment2, dataSegmentBar);
    final SegmentPublishResult result1 = coordinator.announceHistoricalSegments(SEGMENTS, dropSegments, null, null);
    Assert.assertEquals(SegmentPublishResult.fail("java.lang.RuntimeException: Aborting transaction!"), result1);
    // Should only be tried once. Since dropSegmentsWithHandle will return FAILURE (not TRY_AGAIN) as set of
    // segments to drop contains more than one datasource.
    Assert.assertEquals(1, segmentTableDropUpdateCounter.get());
    Assert.assertEquals(ImmutableList.of(existingSegment1.getId().toString(), existingSegment2.getId().toString()), retrieveUsedSegmentIds());
}
Also used : SegmentPublishResult(org.apache.druid.indexing.overlord.SegmentPublishResult) LinearShardSpec(org.apache.druid.timeline.partition.LinearShardSpec) DataSegment(org.apache.druid.timeline.DataSegment) Test(org.junit.Test)

Aggregations

SegmentPublishResult (org.apache.druid.indexing.overlord.SegmentPublishResult)17 Test (org.junit.Test)14 ObjectMetadata (org.apache.druid.indexing.overlord.ObjectMetadata)7 DataSegment (org.apache.druid.timeline.DataSegment)7 NoopTask (org.apache.druid.indexing.common.task.NoopTask)5 Task (org.apache.druid.indexing.common.task.Task)5 IOException (java.io.IOException)3 HashSet (java.util.HashSet)2 Set (java.util.Set)2 LinearShardSpec (org.apache.druid.timeline.partition.LinearShardSpec)2 Handle (org.skife.jdbi.v2.Handle)2 JsonProcessingException (com.fasterxml.jackson.core.JsonProcessingException)1 TypeReference (com.fasterxml.jackson.core.type.TypeReference)1 ObjectMapper (com.fasterxml.jackson.databind.ObjectMapper)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 ImmutableSet (com.google.common.collect.ImmutableSet)1 Iterables (com.google.common.collect.Iterables)1 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)1 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)1