use of io.druid.indexing.common.config.TaskStorageConfig in project druid by druid-io.
the class TaskLifecycleTest method setUpTaskStorage.
private TaskStorage setUpTaskStorage() {
Preconditions.checkNotNull(mapper);
Preconditions.checkNotNull(derbyConnectorRule);
TaskStorage taskStorage;
switch(taskStorageType) {
case HEAP_TASK_STORAGE:
{
taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null) {
});
break;
}
case METADATA_TASK_STORAGE:
{
TestDerbyConnector testDerbyConnector = derbyConnectorRule.getConnector();
mapper.registerSubtypes(new NamedType(MockExceptionalFirehoseFactory.class, "mockExcepFirehoseFactory"), new NamedType(MockFirehoseFactory.class, "mockFirehoseFactory"));
testDerbyConnector.createTaskTables();
testDerbyConnector.createSegmentTable();
taskStorage = new MetadataTaskStorage(testDerbyConnector, new TaskStorageConfig(null), new SQLMetadataStorageActionHandlerFactory(testDerbyConnector, derbyConnectorRule.metadataTablesConfigSupplier().get(), mapper));
break;
}
default:
{
throw new RuntimeException(String.format("Unknown task storage type [%s]", taskStorageType));
}
}
tsqa = new TaskStorageQueryAdapter(taskStorage);
return taskStorage;
}
use of io.druid.indexing.common.config.TaskStorageConfig in project druid by druid-io.
the class OverlordTest method setUp.
@Before
public void setUp() throws Exception {
req = EasyMock.createStrictMock(HttpServletRequest.class);
supervisorManager = EasyMock.createMock(SupervisorManager.class);
taskLockbox = EasyMock.createStrictMock(TaskLockbox.class);
taskLockbox.syncFromStorage();
EasyMock.expectLastCall().atLeastOnce();
taskLockbox.add(EasyMock.<Task>anyObject());
EasyMock.expectLastCall().atLeastOnce();
taskLockbox.remove(EasyMock.<Task>anyObject());
EasyMock.expectLastCall().atLeastOnce();
// for second Noop Task directly added to deep storage.
taskLockbox.add(EasyMock.<Task>anyObject());
EasyMock.expectLastCall().atLeastOnce();
taskLockbox.remove(EasyMock.<Task>anyObject());
EasyMock.expectLastCall().atLeastOnce();
taskActionClientFactory = EasyMock.createStrictMock(TaskActionClientFactory.class);
EasyMock.expect(taskActionClientFactory.create(EasyMock.<Task>anyObject())).andReturn(null).anyTimes();
EasyMock.replay(taskLockbox, taskActionClientFactory);
taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
runTaskCountDownLatches = new CountDownLatch[2];
runTaskCountDownLatches[0] = new CountDownLatch(1);
runTaskCountDownLatches[1] = new CountDownLatch(1);
taskCompletionCountDownLatches = new CountDownLatch[2];
taskCompletionCountDownLatches[0] = new CountDownLatch(1);
taskCompletionCountDownLatches[1] = new CountDownLatch(1);
announcementLatch = new CountDownLatch(1);
IndexerZkConfig indexerZkConfig = new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null, null);
setupServerAndCurator();
curator.start();
curator.blockUntilConnected();
curator.create().creatingParentsIfNeeded().forPath(indexerZkConfig.getLeaderLatchPath());
druidNode = new DruidNode("hey", "what", 1234);
ServiceEmitter serviceEmitter = new NoopServiceEmitter();
taskMaster = new TaskMaster(new TaskQueueConfig(null, new Period(1), null, new Period(10)), taskLockbox, taskStorage, taskActionClientFactory, druidNode, indexerZkConfig, new TaskRunnerFactory<MockTaskRunner>() {
@Override
public MockTaskRunner build() {
return new MockTaskRunner(runTaskCountDownLatches, taskCompletionCountDownLatches);
}
}, curator, new NoopServiceAnnouncer() {
@Override
public void announce(DruidNode node) {
announcementLatch.countDown();
}
}, new CoordinatorOverlordServiceConfig(null, null), serviceEmitter, supervisorManager, EasyMock.createNiceMock(OverlordHelperManager.class));
EmittingLogger.registerEmitter(serviceEmitter);
}
use of io.druid.indexing.common.config.TaskStorageConfig in project druid by druid-io.
the class KafkaIndexTaskTest method makeToolboxFactory.
private void makeToolboxFactory() throws IOException {
directory = tempFolder.newFolder();
final TestUtils testUtils = new TestUtils();
final ObjectMapper objectMapper = testUtils.getTestObjectMapper();
for (Module module : new KafkaIndexTaskModule().getJacksonModules()) {
objectMapper.registerModule(module);
}
final TaskConfig taskConfig = new TaskConfig(new File(directory, "taskBaseDir").getPath(), null, null, 50000, null, false, null, null);
final TestDerbyConnector derbyConnector = derby.getConnector();
derbyConnector.createDataSourceTable();
derbyConnector.createPendingSegmentsTable();
derbyConnector.createSegmentTable();
derbyConnector.createRulesTable();
derbyConnector.createConfigTable();
derbyConnector.createTaskTables();
derbyConnector.createAuditTable();
taskStorage = new MetadataTaskStorage(derbyConnector, new TaskStorageConfig(null), new SQLMetadataStorageActionHandlerFactory(derbyConnector, derby.metadataTablesConfigSupplier().get(), objectMapper));
metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(testUtils.getTestObjectMapper(), derby.metadataTablesConfigSupplier().get(), derbyConnector);
taskLockbox = new TaskLockbox(taskStorage);
final TaskActionToolbox taskActionToolbox = new TaskActionToolbox(taskLockbox, metadataStorageCoordinator, emitter, new SupervisorManager(null));
final TaskActionClientFactory taskActionClientFactory = new LocalTaskActionClientFactory(taskStorage, taskActionToolbox);
final SegmentHandoffNotifierFactory handoffNotifierFactory = new SegmentHandoffNotifierFactory() {
@Override
public SegmentHandoffNotifier createSegmentHandoffNotifier(String dataSource) {
return new SegmentHandoffNotifier() {
@Override
public boolean registerSegmentHandoffCallback(SegmentDescriptor descriptor, Executor exec, Runnable handOffRunnable) {
if (doHandoff) {
// Simulate immediate handoff
exec.execute(handOffRunnable);
}
return true;
}
@Override
public void start() {
//Noop
}
@Override
public void close() {
//Noop
}
};
}
};
final LocalDataSegmentPusherConfig dataSegmentPusherConfig = new LocalDataSegmentPusherConfig();
dataSegmentPusherConfig.storageDirectory = getSegmentDirectory();
final DataSegmentPusher dataSegmentPusher = new LocalDataSegmentPusher(dataSegmentPusherConfig, objectMapper);
toolboxFactory = new TaskToolboxFactory(taskConfig, taskActionClientFactory, emitter, dataSegmentPusher, new TestDataSegmentKiller(), // DataSegmentMover
null, // DataSegmentArchiver
null, new TestDataSegmentAnnouncer(), handoffNotifierFactory, makeTimeseriesOnlyConglomerate(), // queryExecutorService
MoreExecutors.sameThreadExecutor(), EasyMock.createMock(MonitorScheduler.class), new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {
@Override
public List<StorageLocationConfig> getLocations() {
return Lists.newArrayList();
}
}, testUtils.getTestObjectMapper())), testUtils.getTestObjectMapper(), testUtils.getTestIndexMerger(), testUtils.getTestIndexIO(), MapCache.create(1024), new CacheConfig(), testUtils.getTestIndexMergerV9());
}
use of io.druid.indexing.common.config.TaskStorageConfig in project druid by druid-io.
the class IngestSegmentFirehoseFactoryTest method constructorFeeder.
@Parameterized.Parameters(name = "{1}")
public static Collection<Object[]> constructorFeeder() throws IOException {
final IndexSpec indexSpec = new IndexSpec();
final HeapMemoryTaskStorage ts = new HeapMemoryTaskStorage(new TaskStorageConfig(null) {
});
final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.NONE).withMinTimestamp(JodaUtils.MIN_INSTANT).withDimensionsSpec(ROW_PARSER).withMetrics(new AggregatorFactory[] { new LongSumAggregatorFactory(METRIC_LONG_NAME, DIM_LONG_NAME), new DoubleSumAggregatorFactory(METRIC_FLOAT_NAME, DIM_FLOAT_NAME) }).build();
final OnheapIncrementalIndex index = new OnheapIncrementalIndex(schema, true, MAX_ROWS * MAX_SHARD_NUMBER);
for (Integer i = 0; i < MAX_ROWS; ++i) {
index.add(ROW_PARSER.parse(buildRow(i.longValue())));
}
if (!persistDir.mkdirs() && !persistDir.exists()) {
throw new IOException(String.format("Could not create directory at [%s]", persistDir.getAbsolutePath()));
}
INDEX_MERGER.persist(index, persistDir, indexSpec);
final TaskLockbox tl = new TaskLockbox(ts);
final IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(null, null, null) {
private final Set<DataSegment> published = Sets.newHashSet();
private final Set<DataSegment> nuked = Sets.newHashSet();
@Override
public List<DataSegment> getUsedSegmentsForInterval(String dataSource, Interval interval) throws IOException {
return ImmutableList.copyOf(segmentSet);
}
@Override
public List<DataSegment> getUsedSegmentsForIntervals(String dataSource, List<Interval> interval) throws IOException {
return ImmutableList.copyOf(segmentSet);
}
@Override
public List<DataSegment> getUnusedSegmentsForInterval(String dataSource, Interval interval) {
return ImmutableList.of();
}
@Override
public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) {
Set<DataSegment> added = Sets.newHashSet();
for (final DataSegment segment : segments) {
if (published.add(segment)) {
added.add(segment);
}
}
return ImmutableSet.copyOf(added);
}
@Override
public void deleteSegments(Set<DataSegment> segments) {
nuked.addAll(segments);
}
};
final LocalTaskActionClientFactory tac = new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tl, mdc, newMockEmitter(), EasyMock.createMock(SupervisorManager.class)));
SegmentHandoffNotifierFactory notifierFactory = EasyMock.createNiceMock(SegmentHandoffNotifierFactory.class);
EasyMock.replay(notifierFactory);
final TaskToolboxFactory taskToolboxFactory = new TaskToolboxFactory(new TaskConfig(tmpDir.getAbsolutePath(), null, null, 50000, null, false, null, null), tac, newMockEmitter(), new DataSegmentPusher() {
@Deprecated
@Override
public String getPathForHadoop(String dataSource) {
return getPathForHadoop();
}
@Override
public String getPathForHadoop() {
throw new UnsupportedOperationException();
}
@Override
public DataSegment push(File file, DataSegment segment) throws IOException {
return segment;
}
}, new DataSegmentKiller() {
@Override
public void kill(DataSegment segments) throws SegmentLoadingException {
}
@Override
public void killAll() throws IOException {
throw new UnsupportedOperationException("not implemented");
}
}, new DataSegmentMover() {
@Override
public DataSegment move(DataSegment dataSegment, Map<String, Object> targetLoadSpec) throws SegmentLoadingException {
return dataSegment;
}
}, new DataSegmentArchiver() {
@Override
public DataSegment archive(DataSegment segment) throws SegmentLoadingException {
return segment;
}
@Override
public DataSegment restore(DataSegment segment) throws SegmentLoadingException {
return segment;
}
}, // segment announcer
null, notifierFactory, // query runner factory conglomerate corporation unionized collective
null, // query executor service
null, // monitor scheduler
null, new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {
@Override
public List<StorageLocationConfig> getLocations() {
return Lists.newArrayList();
}
}, MAPPER)), MAPPER, INDEX_MERGER, INDEX_IO, null, null, INDEX_MERGER_V9);
Collection<Object[]> values = new LinkedList<>();
for (InputRowParser parser : Arrays.<InputRowParser>asList(ROW_PARSER, new MapInputRowParser(new JSONParseSpec(new TimestampSpec(TIME_COLUMN, "auto", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.<String>of()), ImmutableList.of(DIM_FLOAT_NAME, DIM_LONG_NAME), ImmutableList.<SpatialDimensionSchema>of()), null, null)))) {
for (List<String> dim_names : Arrays.<List<String>>asList(null, ImmutableList.of(DIM_NAME))) {
for (List<String> metric_names : Arrays.<List<String>>asList(null, ImmutableList.of(METRIC_LONG_NAME, METRIC_FLOAT_NAME))) {
values.add(new Object[] { new IngestSegmentFirehoseFactory(DATA_SOURCE_NAME, FOREVER, new SelectorDimFilter(DIM_NAME, DIM_VALUE, null), dim_names, metric_names, Guice.createInjector(new Module() {
@Override
public void configure(Binder binder) {
binder.bind(TaskToolboxFactory.class).toInstance(taskToolboxFactory);
}
}), INDEX_IO), String.format("DimNames[%s]MetricNames[%s]ParserDimNames[%s]", dim_names == null ? "null" : "dims", metric_names == null ? "null" : "metrics", parser == ROW_PARSER ? "dims" : "null"), parser });
}
}
}
return values;
}
use of io.druid.indexing.common.config.TaskStorageConfig in project druid by druid-io.
the class RealtimeIndexTaskTest method testRestoreAfterHandoffAttemptDuringShutdown.
@Test(timeout = 60_000L)
public void testRestoreAfterHandoffAttemptDuringShutdown() throws Exception {
final TaskStorage taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
final File directory = tempFolder.newFolder();
final RealtimeIndexTask task1 = makeRealtimeTask(null);
final DataSegment publishedSegment;
// First run:
{
final TaskToolbox taskToolbox = makeToolbox(task1, taskStorage, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task1.getFirehose() == null) {
Thread.sleep(50);
}
final TestFirehose firehose = (TestFirehose) task1.getFirehose();
firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo"))));
// Stop the firehose, this will trigger a finishJob.
firehose.close();
// Wait for publish.
while (mdc.getPublished().isEmpty()) {
Thread.sleep(50);
}
publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
// Do a query.
Assert.assertEquals(1, sumMetric(task1, "rows"));
// Trigger graceful shutdown.
task1.stopGracefully();
// Wait for the task to finish. The status doesn't really matter.
while (!statusFuture.isDone()) {
Thread.sleep(50);
}
}
// Second run:
{
final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
final TaskToolbox taskToolbox = makeToolbox(task2, taskStorage, mdc, directory);
final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
// Wait for firehose to show up, it starts off null.
while (task2.getFirehose() == null) {
Thread.sleep(50);
}
// Stop the firehose again, this will start another handoff.
final TestFirehose firehose = (TestFirehose) task2.getFirehose();
// Stop the firehose, this will trigger a finishJob.
firehose.close();
// publishedSegment is still published. No reason it shouldn't be.
Assert.assertEquals(ImmutableSet.of(publishedSegment), mdc.getPublished());
// Wait for a handoffCallback to show up.
while (handOffCallbacks.isEmpty()) {
Thread.sleep(50);
}
// Simulate handoff.
for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
}
handOffCallbacks.clear();
// Wait for the task to finish.
final TaskStatus taskStatus = statusFuture.get();
Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
}
}
Aggregations