Search in sources :

Example 1 with HeapMemoryTaskStorage

use of io.druid.indexing.overlord.HeapMemoryTaskStorage in project druid by druid-io.

the class OverlordTest method setUp.

@Before
public void setUp() throws Exception {
    req = EasyMock.createStrictMock(HttpServletRequest.class);
    supervisorManager = EasyMock.createMock(SupervisorManager.class);
    taskLockbox = EasyMock.createStrictMock(TaskLockbox.class);
    taskLockbox.syncFromStorage();
    EasyMock.expectLastCall().atLeastOnce();
    taskLockbox.add(EasyMock.<Task>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    taskLockbox.remove(EasyMock.<Task>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    // for second Noop Task directly added to deep storage.
    taskLockbox.add(EasyMock.<Task>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    taskLockbox.remove(EasyMock.<Task>anyObject());
    EasyMock.expectLastCall().atLeastOnce();
    taskActionClientFactory = EasyMock.createStrictMock(TaskActionClientFactory.class);
    EasyMock.expect(taskActionClientFactory.create(EasyMock.<Task>anyObject())).andReturn(null).anyTimes();
    EasyMock.replay(taskLockbox, taskActionClientFactory);
    taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
    runTaskCountDownLatches = new CountDownLatch[2];
    runTaskCountDownLatches[0] = new CountDownLatch(1);
    runTaskCountDownLatches[1] = new CountDownLatch(1);
    taskCompletionCountDownLatches = new CountDownLatch[2];
    taskCompletionCountDownLatches[0] = new CountDownLatch(1);
    taskCompletionCountDownLatches[1] = new CountDownLatch(1);
    announcementLatch = new CountDownLatch(1);
    IndexerZkConfig indexerZkConfig = new IndexerZkConfig(new ZkPathsConfig(), null, null, null, null, null);
    setupServerAndCurator();
    curator.start();
    curator.blockUntilConnected();
    curator.create().creatingParentsIfNeeded().forPath(indexerZkConfig.getLeaderLatchPath());
    druidNode = new DruidNode("hey", "what", 1234);
    ServiceEmitter serviceEmitter = new NoopServiceEmitter();
    taskMaster = new TaskMaster(new TaskQueueConfig(null, new Period(1), null, new Period(10)), taskLockbox, taskStorage, taskActionClientFactory, druidNode, indexerZkConfig, new TaskRunnerFactory<MockTaskRunner>() {

        @Override
        public MockTaskRunner build() {
            return new MockTaskRunner(runTaskCountDownLatches, taskCompletionCountDownLatches);
        }
    }, curator, new NoopServiceAnnouncer() {

        @Override
        public void announce(DruidNode node) {
            announcementLatch.countDown();
        }
    }, new CoordinatorOverlordServiceConfig(null, null), serviceEmitter, supervisorManager, EasyMock.createNiceMock(OverlordHelperManager.class));
    EmittingLogger.registerEmitter(serviceEmitter);
}
Also used : IndexerZkConfig(io.druid.server.initialization.IndexerZkConfig) NoopServiceEmitter(io.druid.server.metrics.NoopServiceEmitter) ServiceEmitter(com.metamx.emitter.service.ServiceEmitter) Task(io.druid.indexing.common.task.Task) NoopTask(io.druid.indexing.common.task.NoopTask) CoordinatorOverlordServiceConfig(io.druid.server.coordinator.CoordinatorOverlordServiceConfig) TaskStorageConfig(io.druid.indexing.common.config.TaskStorageConfig) HeapMemoryTaskStorage(io.druid.indexing.overlord.HeapMemoryTaskStorage) TaskActionClientFactory(io.druid.indexing.common.actions.TaskActionClientFactory) Period(org.joda.time.Period) NoopServiceEmitter(io.druid.server.metrics.NoopServiceEmitter) CountDownLatch(java.util.concurrent.CountDownLatch) HttpServletRequest(javax.servlet.http.HttpServletRequest) SupervisorManager(io.druid.indexing.overlord.supervisor.SupervisorManager) ZkPathsConfig(io.druid.server.initialization.ZkPathsConfig) TaskLockbox(io.druid.indexing.overlord.TaskLockbox) TaskQueueConfig(io.druid.indexing.overlord.config.TaskQueueConfig) DruidNode(io.druid.server.DruidNode) TaskMaster(io.druid.indexing.overlord.TaskMaster) NoopServiceAnnouncer(io.druid.curator.discovery.NoopServiceAnnouncer) TaskRunnerFactory(io.druid.indexing.overlord.TaskRunnerFactory) Before(org.junit.Before)

Example 2 with HeapMemoryTaskStorage

use of io.druid.indexing.overlord.HeapMemoryTaskStorage in project druid by druid-io.

the class RealtimeIndexTaskTest method testRestoreAfterHandoffAttemptDuringShutdown.

@Test(timeout = 60_000L)
public void testRestoreAfterHandoffAttemptDuringShutdown() throws Exception {
    final TaskStorage taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
    final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
    final File directory = tempFolder.newFolder();
    final RealtimeIndexTask task1 = makeRealtimeTask(null);
    final DataSegment publishedSegment;
    // First run:
    {
        final TaskToolbox taskToolbox = makeToolbox(task1, taskStorage, mdc, directory);
        final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
        // Wait for firehose to show up, it starts off null.
        while (task1.getFirehose() == null) {
            Thread.sleep(50);
        }
        final TestFirehose firehose = (TestFirehose) task1.getFirehose();
        firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo"))));
        // Stop the firehose, this will trigger a finishJob.
        firehose.close();
        // Wait for publish.
        while (mdc.getPublished().isEmpty()) {
            Thread.sleep(50);
        }
        publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
        // Do a query.
        Assert.assertEquals(1, sumMetric(task1, "rows"));
        // Trigger graceful shutdown.
        task1.stopGracefully();
        // Wait for the task to finish. The status doesn't really matter.
        while (!statusFuture.isDone()) {
            Thread.sleep(50);
        }
    }
    // Second run:
    {
        final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
        final TaskToolbox taskToolbox = makeToolbox(task2, taskStorage, mdc, directory);
        final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
        // Wait for firehose to show up, it starts off null.
        while (task2.getFirehose() == null) {
            Thread.sleep(50);
        }
        // Stop the firehose again, this will start another handoff.
        final TestFirehose firehose = (TestFirehose) task2.getFirehose();
        // Stop the firehose, this will trigger a finishJob.
        firehose.close();
        // publishedSegment is still published. No reason it shouldn't be.
        Assert.assertEquals(ImmutableSet.of(publishedSegment), mdc.getPublished());
        // Wait for a handoffCallback to show up.
        while (handOffCallbacks.isEmpty()) {
            Thread.sleep(50);
        }
        // Simulate handoff.
        for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
            final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
            Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
            executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
        }
        handOffCallbacks.clear();
        // Wait for the task to finish.
        final TaskStatus taskStatus = statusFuture.get();
        Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
    }
}
Also used : TaskStorageConfig(io.druid.indexing.common.config.TaskStorageConfig) HeapMemoryTaskStorage(io.druid.indexing.overlord.HeapMemoryTaskStorage) TaskStatus(io.druid.indexing.common.TaskStatus) DataSegment(io.druid.timeline.DataSegment) TaskToolbox(io.druid.indexing.common.TaskToolbox) Executor(java.util.concurrent.Executor) TaskStorage(io.druid.indexing.overlord.TaskStorage) HeapMemoryTaskStorage(io.druid.indexing.overlord.HeapMemoryTaskStorage) TestIndexerMetadataStorageCoordinator(io.druid.indexing.test.TestIndexerMetadataStorageCoordinator) SegmentDescriptor(io.druid.query.SegmentDescriptor) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) InputRow(io.druid.data.input.InputRow) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) File(java.io.File) Pair(io.druid.java.util.common.Pair) Test(org.junit.Test)

Example 3 with HeapMemoryTaskStorage

use of io.druid.indexing.overlord.HeapMemoryTaskStorage in project druid by druid-io.

the class TaskActionTestKit method before.

@Override
public void before() {
    taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(new Period("PT24H")));
    taskLockbox = new TaskLockbox(taskStorage);
    testDerbyConnector = new TestDerbyConnector(Suppliers.ofInstance(new MetadataStorageConnectorConfig()), Suppliers.ofInstance(metadataStorageTablesConfig));
    metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator(new TestUtils().getTestObjectMapper(), metadataStorageTablesConfig, testDerbyConnector);
    taskActionToolbox = new TaskActionToolbox(taskLockbox, metadataStorageCoordinator, new NoopServiceEmitter(), EasyMock.createMock(SupervisorManager.class));
    testDerbyConnector.createDataSourceTable();
    testDerbyConnector.createPendingSegmentsTable();
    testDerbyConnector.createSegmentTable();
    testDerbyConnector.createRulesTable();
    testDerbyConnector.createConfigTable();
    testDerbyConnector.createTaskTables();
    testDerbyConnector.createAuditTable();
}
Also used : IndexerSQLMetadataStorageCoordinator(io.druid.metadata.IndexerSQLMetadataStorageCoordinator) TestUtils(io.druid.indexing.common.TestUtils) MetadataStorageConnectorConfig(io.druid.metadata.MetadataStorageConnectorConfig) TaskStorageConfig(io.druid.indexing.common.config.TaskStorageConfig) HeapMemoryTaskStorage(io.druid.indexing.overlord.HeapMemoryTaskStorage) Period(org.joda.time.Period) TaskLockbox(io.druid.indexing.overlord.TaskLockbox) NoopServiceEmitter(io.druid.server.metrics.NoopServiceEmitter) TestDerbyConnector(io.druid.metadata.TestDerbyConnector)

Example 4 with HeapMemoryTaskStorage

use of io.druid.indexing.overlord.HeapMemoryTaskStorage in project druid by druid-io.

the class IngestSegmentFirehoseFactoryTest method constructorFeeder.

@Parameterized.Parameters(name = "{1}")
public static Collection<Object[]> constructorFeeder() throws IOException {
    final IndexSpec indexSpec = new IndexSpec();
    final HeapMemoryTaskStorage ts = new HeapMemoryTaskStorage(new TaskStorageConfig(null) {
    });
    final IncrementalIndexSchema schema = new IncrementalIndexSchema.Builder().withQueryGranularity(Granularities.NONE).withMinTimestamp(JodaUtils.MIN_INSTANT).withDimensionsSpec(ROW_PARSER).withMetrics(new AggregatorFactory[] { new LongSumAggregatorFactory(METRIC_LONG_NAME, DIM_LONG_NAME), new DoubleSumAggregatorFactory(METRIC_FLOAT_NAME, DIM_FLOAT_NAME) }).build();
    final OnheapIncrementalIndex index = new OnheapIncrementalIndex(schema, true, MAX_ROWS * MAX_SHARD_NUMBER);
    for (Integer i = 0; i < MAX_ROWS; ++i) {
        index.add(ROW_PARSER.parse(buildRow(i.longValue())));
    }
    if (!persistDir.mkdirs() && !persistDir.exists()) {
        throw new IOException(String.format("Could not create directory at [%s]", persistDir.getAbsolutePath()));
    }
    INDEX_MERGER.persist(index, persistDir, indexSpec);
    final TaskLockbox tl = new TaskLockbox(ts);
    final IndexerSQLMetadataStorageCoordinator mdc = new IndexerSQLMetadataStorageCoordinator(null, null, null) {

        private final Set<DataSegment> published = Sets.newHashSet();

        private final Set<DataSegment> nuked = Sets.newHashSet();

        @Override
        public List<DataSegment> getUsedSegmentsForInterval(String dataSource, Interval interval) throws IOException {
            return ImmutableList.copyOf(segmentSet);
        }

        @Override
        public List<DataSegment> getUsedSegmentsForIntervals(String dataSource, List<Interval> interval) throws IOException {
            return ImmutableList.copyOf(segmentSet);
        }

        @Override
        public List<DataSegment> getUnusedSegmentsForInterval(String dataSource, Interval interval) {
            return ImmutableList.of();
        }

        @Override
        public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) {
            Set<DataSegment> added = Sets.newHashSet();
            for (final DataSegment segment : segments) {
                if (published.add(segment)) {
                    added.add(segment);
                }
            }
            return ImmutableSet.copyOf(added);
        }

        @Override
        public void deleteSegments(Set<DataSegment> segments) {
            nuked.addAll(segments);
        }
    };
    final LocalTaskActionClientFactory tac = new LocalTaskActionClientFactory(ts, new TaskActionToolbox(tl, mdc, newMockEmitter(), EasyMock.createMock(SupervisorManager.class)));
    SegmentHandoffNotifierFactory notifierFactory = EasyMock.createNiceMock(SegmentHandoffNotifierFactory.class);
    EasyMock.replay(notifierFactory);
    final TaskToolboxFactory taskToolboxFactory = new TaskToolboxFactory(new TaskConfig(tmpDir.getAbsolutePath(), null, null, 50000, null, false, null, null), tac, newMockEmitter(), new DataSegmentPusher() {

        @Deprecated
        @Override
        public String getPathForHadoop(String dataSource) {
            return getPathForHadoop();
        }

        @Override
        public String getPathForHadoop() {
            throw new UnsupportedOperationException();
        }

        @Override
        public DataSegment push(File file, DataSegment segment) throws IOException {
            return segment;
        }
    }, new DataSegmentKiller() {

        @Override
        public void kill(DataSegment segments) throws SegmentLoadingException {
        }

        @Override
        public void killAll() throws IOException {
            throw new UnsupportedOperationException("not implemented");
        }
    }, new DataSegmentMover() {

        @Override
        public DataSegment move(DataSegment dataSegment, Map<String, Object> targetLoadSpec) throws SegmentLoadingException {
            return dataSegment;
        }
    }, new DataSegmentArchiver() {

        @Override
        public DataSegment archive(DataSegment segment) throws SegmentLoadingException {
            return segment;
        }

        @Override
        public DataSegment restore(DataSegment segment) throws SegmentLoadingException {
            return segment;
        }
    }, // segment announcer
    null, notifierFactory, // query runner factory conglomerate corporation unionized collective
    null, // query executor service
    null, // monitor scheduler
    null, new SegmentLoaderFactory(new SegmentLoaderLocalCacheManager(null, new SegmentLoaderConfig() {

        @Override
        public List<StorageLocationConfig> getLocations() {
            return Lists.newArrayList();
        }
    }, MAPPER)), MAPPER, INDEX_MERGER, INDEX_IO, null, null, INDEX_MERGER_V9);
    Collection<Object[]> values = new LinkedList<>();
    for (InputRowParser parser : Arrays.<InputRowParser>asList(ROW_PARSER, new MapInputRowParser(new JSONParseSpec(new TimestampSpec(TIME_COLUMN, "auto", null), new DimensionsSpec(DimensionsSpec.getDefaultSchemas(ImmutableList.<String>of()), ImmutableList.of(DIM_FLOAT_NAME, DIM_LONG_NAME), ImmutableList.<SpatialDimensionSchema>of()), null, null)))) {
        for (List<String> dim_names : Arrays.<List<String>>asList(null, ImmutableList.of(DIM_NAME))) {
            for (List<String> metric_names : Arrays.<List<String>>asList(null, ImmutableList.of(METRIC_LONG_NAME, METRIC_FLOAT_NAME))) {
                values.add(new Object[] { new IngestSegmentFirehoseFactory(DATA_SOURCE_NAME, FOREVER, new SelectorDimFilter(DIM_NAME, DIM_VALUE, null), dim_names, metric_names, Guice.createInjector(new Module() {

                    @Override
                    public void configure(Binder binder) {
                        binder.bind(TaskToolboxFactory.class).toInstance(taskToolboxFactory);
                    }
                }), INDEX_IO), String.format("DimNames[%s]MetricNames[%s]ParserDimNames[%s]", dim_names == null ? "null" : "dims", metric_names == null ? "null" : "metrics", parser == ROW_PARSER ? "dims" : "null"), parser });
            }
        }
    }
    return values;
}
Also used : DataSegmentArchiver(io.druid.segment.loading.DataSegmentArchiver) OnheapIncrementalIndex(io.druid.segment.incremental.OnheapIncrementalIndex) DataSegmentMover(io.druid.segment.loading.DataSegmentMover) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) LinkedList(java.util.LinkedList) LocalTaskActionClientFactory(io.druid.indexing.common.actions.LocalTaskActionClientFactory) SegmentLoaderConfig(io.druid.segment.loading.SegmentLoaderConfig) SegmentLoaderFactory(io.druid.indexing.common.SegmentLoaderFactory) SegmentLoaderLocalCacheManager(io.druid.segment.loading.SegmentLoaderLocalCacheManager) JSONParseSpec(io.druid.data.input.impl.JSONParseSpec) IndexerSQLMetadataStorageCoordinator(io.druid.metadata.IndexerSQLMetadataStorageCoordinator) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) HeapMemoryTaskStorage(io.druid.indexing.overlord.HeapMemoryTaskStorage) LinkedList(java.util.LinkedList) MapInputRowParser(io.druid.data.input.impl.MapInputRowParser) InputRowParser(io.druid.data.input.impl.InputRowParser) Module(com.google.inject.Module) SimpleModule(com.fasterxml.jackson.databind.module.SimpleModule) File(java.io.File) Interval(org.joda.time.Interval) IndexSpec(io.druid.segment.IndexSpec) DataSegmentPusher(io.druid.segment.loading.DataSegmentPusher) ImmutableSet(com.google.common.collect.ImmutableSet) Set(java.util.Set) MapInputRowParser(io.druid.data.input.impl.MapInputRowParser) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) DataSegmentKiller(io.druid.segment.loading.DataSegmentKiller) TaskConfig(io.druid.indexing.common.config.TaskConfig) DataSegment(io.druid.timeline.DataSegment) Binder(com.google.inject.Binder) TaskToolboxFactory(io.druid.indexing.common.TaskToolboxFactory) SelectorDimFilter(io.druid.query.filter.SelectorDimFilter) TaskActionToolbox(io.druid.indexing.common.actions.TaskActionToolbox) TimestampSpec(io.druid.data.input.impl.TimestampSpec) IncrementalIndexSchema(io.druid.segment.incremental.IncrementalIndexSchema) SegmentLoadingException(io.druid.segment.loading.SegmentLoadingException) TaskStorageConfig(io.druid.indexing.common.config.TaskStorageConfig) IOException(java.io.IOException) DoubleSumAggregatorFactory(io.druid.query.aggregation.DoubleSumAggregatorFactory) AggregatorFactory(io.druid.query.aggregation.AggregatorFactory) LongSumAggregatorFactory(io.druid.query.aggregation.LongSumAggregatorFactory) SegmentHandoffNotifierFactory(io.druid.segment.realtime.plumber.SegmentHandoffNotifierFactory) SpatialDimensionSchema(io.druid.data.input.impl.SpatialDimensionSchema) TaskLockbox(io.druid.indexing.overlord.TaskLockbox) DimensionsSpec(io.druid.data.input.impl.DimensionsSpec)

Aggregations

TaskStorageConfig (io.druid.indexing.common.config.TaskStorageConfig)4 HeapMemoryTaskStorage (io.druid.indexing.overlord.HeapMemoryTaskStorage)4 TaskLockbox (io.druid.indexing.overlord.TaskLockbox)3 IndexerSQLMetadataStorageCoordinator (io.druid.metadata.IndexerSQLMetadataStorageCoordinator)2 DataSegment (io.druid.timeline.DataSegment)2 File (java.io.File)2 SimpleModule (com.fasterxml.jackson.databind.module.SimpleModule)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableSet (com.google.common.collect.ImmutableSet)1 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)1 Binder (com.google.inject.Binder)1 Module (com.google.inject.Module)1 ServiceEmitter (com.metamx.emitter.service.ServiceEmitter)1 NoopServiceAnnouncer (io.druid.curator.discovery.NoopServiceAnnouncer)1 InputRow (io.druid.data.input.InputRow)1 MapBasedInputRow (io.druid.data.input.MapBasedInputRow)1 DimensionsSpec (io.druid.data.input.impl.DimensionsSpec)1 InputRowParser (io.druid.data.input.impl.InputRowParser)1 JSONParseSpec (io.druid.data.input.impl.JSONParseSpec)1 MapInputRowParser (io.druid.data.input.impl.MapInputRowParser)1