use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class SegmentTransactionalInsertAction method perform.
/**
* Performs some sanity checks and publishes the given segments.
*/
@Override
public SegmentPublishResult perform(Task task, TaskActionToolbox toolbox) {
final SegmentPublishResult retVal;
if (segments.isEmpty()) {
// but still needs to update metadata with the progress that the task made.
try {
retVal = toolbox.getIndexerMetadataStorageCoordinator().commitMetadataOnly(dataSource, startMetadata, endMetadata);
} catch (Exception e) {
throw new RuntimeException(e);
}
return retVal;
}
final Set<DataSegment> allSegments = new HashSet<>(segments);
if (segmentsToBeOverwritten != null) {
allSegments.addAll(segmentsToBeOverwritten);
}
if (segmentsToBeDropped != null) {
allSegments.addAll(segmentsToBeDropped);
}
TaskLocks.checkLockCoversSegments(task, toolbox.getTaskLockbox(), allSegments);
if (segmentsToBeOverwritten != null && !segmentsToBeOverwritten.isEmpty()) {
final List<TaskLock> locks = toolbox.getTaskLockbox().findLocksForTask(task);
// Let's do some sanity check that newSegments can overwrite oldSegments.
if (locks.get(0).getGranularity() == LockGranularity.SEGMENT) {
checkWithSegmentLock();
}
}
try {
retVal = toolbox.getTaskLockbox().doInCriticalSection(task, allSegments.stream().map(DataSegment::getInterval).collect(Collectors.toList()), CriticalAction.<SegmentPublishResult>builder().onValidLocks(() -> toolbox.getIndexerMetadataStorageCoordinator().announceHistoricalSegments(segments, segmentsToBeDropped, startMetadata, endMetadata)).onInvalidLocks(() -> SegmentPublishResult.fail("Invalid task locks. Maybe they are revoked by a higher priority task." + " Please check the overlord log for details.")).build());
} catch (Exception e) {
throw new RuntimeException(e);
}
// Emit metrics
final ServiceMetricEvent.Builder metricBuilder = new ServiceMetricEvent.Builder();
IndexTaskUtils.setTaskDimensions(metricBuilder, task);
if (retVal.isSuccess()) {
toolbox.getEmitter().emit(metricBuilder.build("segment/txn/success", 1));
} else {
toolbox.getEmitter().emit(metricBuilder.build("segment/txn/failure", 1));
}
// getSegments() should return an empty set if announceHistoricalSegments() failed
for (DataSegment segment : retVal.getSegments()) {
metricBuilder.setDimension(DruidMetrics.INTERVAL, segment.getInterval().toString());
metricBuilder.setDimension(DruidMetrics.PARTITIONING_TYPE, segment.getShardSpec() == null ? null : segment.getShardSpec().getType());
toolbox.getEmitter().emit(metricBuilder.build("segment/added/bytes", segment.getSize()));
}
return retVal;
}
use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class AppenderatorDriverRealtimeIndexTaskTest method makeToolboxFactory.
private void makeToolboxFactory(final File directory) {
taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
publishedSegments = new CopyOnWriteArrayList<>();
ObjectMapper mapper = new DefaultObjectMapper();
mapper.registerSubtypes(LinearShardSpec.class);
mapper.registerSubtypes(NumberedShardSpec.class);
IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(mapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnectorRule.getConnector()) {
@Override
public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) throws IOException {
Set<DataSegment> result = super.announceHistoricalSegments(segments);
Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
publishedSegments.addAll(result);
segments.forEach(s -> segmentLatch.countDown());
return result;
}
@Override
public SegmentPublishResult announceHistoricalSegments(Set<DataSegment> segments, Set<DataSegment> segmentsToDrop, DataSourceMetadata startMetadata, DataSourceMetadata endMetadata) throws IOException {
SegmentPublishResult result = super.announceHistoricalSegments(segments, segmentsToDrop, startMetadata, endMetadata);
Assert.assertFalse("Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null);
publishedSegments.addAll(result.getSegments());
result.getSegments().forEach(s -> segmentLatch.countDown());
return result;
}
};
taskLockbox = new TaskLockbox(taskStorage, mdc);
final TaskConfig taskConfig = new TaskConfig(directory.getPath(), null, null, 50000, null, true, null, null, null, false, false, TaskConfig.BATCH_PROCESSING_MODE_DEFAULT.name());
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, taskStorage, mdc, EMITTER, EasyMock.createMock(SupervisorManager.class));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox, new TaskAuditLogConfig(false));
final QueryRunnerFactoryConglomerate conglomerate = new DefaultQueryRunnerFactoryConglomerate(ImmutableMap.of(TimeseriesQuery.class, new TimeseriesQueryRunnerFactory(new TimeseriesQueryQueryToolChest(), new TimeseriesQueryEngine(), (query, future) -> {
// do nothing
})));
handOffCallbacks = new ConcurrentHashMap<>();
final SegmentHandoffNotifierFactory handoffNotifierFactory = dataSource -> new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
handOffCallbacks.put(descriptor, new Pair<>(exec, handOffRunnable));
handoffLatch.countDown();
return true;
}
@Override
public void start() {
// Noop
}
@Override
public void close() {
// Noop
}
};
final TestUtils testUtils = new TestUtils();
taskToolboxFactory = new TaskToolboxFactory(taskConfig, new DruidNode("druid/middlemanager", "localhost", false, 8091, null, true, false), taskActionClientFactory, EMITTER, new TestDataSegmentPusher(), new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), EasyMock.createNiceMock(DataSegmentServerAnnouncer.class), handoffNotifierFactory, () -> conglomerate, // queryExecutorService
DirectQueryProcessingPool.INSTANCE, NoopJoinableFactory.INSTANCE, () -> EasyMock.createMock(MonitorScheduler.class), new SegmentCacheManagerFactory(testUtils.getTestObjectMapper()), testUtils.getTestObjectMapper(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), new CachePopulatorStats(), testUtils.getTestIndexMergerV9(), EasyMock.createNiceMock(DruidNodeAnnouncer.class), EasyMock.createNiceMock(DruidNode.class), new LookupNodeService("tier"), new DataNodeService("tier", 1000, ServerType.INDEXER_EXECUTOR, 0), new SingleFileTaskReportFileWriter(reportsFile), null, AuthTestUtils.TEST_AUTHORIZER_MAPPER, new NoopChatHandlerProvider(), testUtils.getRowIngestionMetersFactory(), new TestAppenderatorsManager(), new NoopIndexingServiceClient(), null, null, null);
}
use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class SegmentTransactionalInsertActionTest method testFailBadVersion.
@Test
public void testFailBadVersion() throws Exception {
final Task task = NoopTask.create();
final SegmentTransactionalInsertAction action = SegmentTransactionalInsertAction.overwriteAction(null, null, ImmutableSet.of(SEGMENT3));
actionTestKit.getTaskLockbox().add(task);
acquireTimeChunkLock(TaskLockType.EXCLUSIVE, task, INTERVAL, 5000);
thrown.expect(IllegalStateException.class);
thrown.expectMessage(CoreMatchers.containsString("are not covered by locks"));
SegmentPublishResult result = action.perform(task, actionTestKit.getTaskActionToolbox());
Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(SEGMENT3)), result);
}
use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class SegmentTransactionalInsertActionTest method testFailTransactionalDropSegment.
@Test
public void testFailTransactionalDropSegment() throws Exception {
final Task task = NoopTask.create();
actionTestKit.getTaskLockbox().add(task);
acquireTimeChunkLock(TaskLockType.EXCLUSIVE, task, INTERVAL, 5000);
SegmentPublishResult result = SegmentTransactionalInsertAction.overwriteAction(null, // SEGMENT1 does not exist, hence will fail to drop
ImmutableSet.of(SEGMENT1), ImmutableSet.of(SEGMENT2)).perform(task, actionTestKit.getTaskActionToolbox());
Assert.assertEquals(SegmentPublishResult.fail("org.apache.druid.metadata.RetryTransactionException: Aborting transaction!"), result);
}
use of org.apache.druid.indexing.overlord.SegmentPublishResult in project druid by druid-io.
the class SegmentTransactionalInsertActionTest method testTransactionalUpdateDataSourceMetadata.
@Test
public void testTransactionalUpdateDataSourceMetadata() throws Exception {
final Task task = NoopTask.create();
actionTestKit.getTaskLockbox().add(task);
acquireTimeChunkLock(TaskLockType.EXCLUSIVE, task, INTERVAL, 5000);
SegmentPublishResult result1 = SegmentTransactionalInsertAction.appendAction(ImmutableSet.of(SEGMENT1), new ObjectMetadata(null), new ObjectMetadata(ImmutableList.of(1))).perform(task, actionTestKit.getTaskActionToolbox());
Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(SEGMENT1)), result1);
SegmentPublishResult result2 = SegmentTransactionalInsertAction.appendAction(ImmutableSet.of(SEGMENT2), new ObjectMetadata(ImmutableList.of(1)), new ObjectMetadata(ImmutableList.of(2))).perform(task, actionTestKit.getTaskActionToolbox());
Assert.assertEquals(SegmentPublishResult.ok(ImmutableSet.of(SEGMENT2)), result2);
Assertions.assertThat(actionTestKit.getMetadataStorageCoordinator().retrieveUsedSegmentsForInterval(DATA_SOURCE, INTERVAL, Segments.ONLY_VISIBLE)).containsExactlyInAnyOrder(SEGMENT1, SEGMENT2);
Assert.assertEquals(new ObjectMetadata(ImmutableList.of(2)), actionTestKit.getMetadataStorageCoordinator().retrieveDataSourceMetadata(DATA_SOURCE));
}
Aggregations