Search in sources :

Example 1 with TaskStorage

use of io.druid.indexing.overlord.TaskStorage in project druid by druid-io.

the class KafkaSupervisorTest method setUp.

@Before
public void setUp() throws Exception {
    taskStorage = createMock(TaskStorage.class);
    taskMaster = createMock(TaskMaster.class);
    taskRunner = createMock(TaskRunner.class);
    indexerMetadataStorageCoordinator = createMock(IndexerMetadataStorageCoordinator.class);
    taskClient = createMock(KafkaIndexTaskClient.class);
    taskQueue = createMock(TaskQueue.class);
    zkServer = new TestingCluster(1);
    zkServer.start();
    kafkaServer = new TestBroker(zkServer.getConnectString(), tempFolder.newFolder(), 1, ImmutableMap.of("num.partitions", String.valueOf(NUM_PARTITIONS)));
    kafkaServer.start();
    kafkaHost = String.format("localhost:%d", kafkaServer.getPort());
    dataSchema = getDataSchema(DATASOURCE);
    tuningConfig = new KafkaSupervisorTuningConfig(1000, 50000, new Period("P1Y"), new File("/test"), null, null, true, false, null, null, numThreads, TEST_CHAT_THREADS, TEST_CHAT_RETRIES, TEST_HTTP_TIMEOUT, TEST_SHUTDOWN_TIMEOUT);
}
Also used : IndexerMetadataStorageCoordinator(io.druid.indexing.overlord.IndexerMetadataStorageCoordinator) TestingCluster(org.apache.curator.test.TestingCluster) TaskStorage(io.druid.indexing.overlord.TaskStorage) TestBroker(io.druid.indexing.kafka.test.TestBroker) KafkaIndexTaskClient(io.druid.indexing.kafka.KafkaIndexTaskClient) TaskQueue(io.druid.indexing.overlord.TaskQueue) Period(org.joda.time.Period) TaskMaster(io.druid.indexing.overlord.TaskMaster) File(java.io.File) TaskRunner(io.druid.indexing.overlord.TaskRunner) Before(org.junit.Before)

Example 2 with TaskStorage

use of io.druid.indexing.overlord.TaskStorage in project druid by druid-io.

the class RealtimeIndexTaskTest method testRestoreAfterHandoffAttemptDuringShutdown.

@Test(timeout = 60_000L)
public void testRestoreAfterHandoffAttemptDuringShutdown() throws Exception {
    final TaskStorage taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null));
    final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
    final File directory = tempFolder.newFolder();
    final RealtimeIndexTask task1 = makeRealtimeTask(null);
    final DataSegment publishedSegment;
    // First run:
    {
        final TaskToolbox taskToolbox = makeToolbox(task1, taskStorage, mdc, directory);
        final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
        // Wait for firehose to show up, it starts off null.
        while (task1.getFirehose() == null) {
            Thread.sleep(50);
        }
        final TestFirehose firehose = (TestFirehose) task1.getFirehose();
        firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo"))));
        // Stop the firehose, this will trigger a finishJob.
        firehose.close();
        // Wait for publish.
        while (mdc.getPublished().isEmpty()) {
            Thread.sleep(50);
        }
        publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
        // Do a query.
        Assert.assertEquals(1, sumMetric(task1, "rows"));
        // Trigger graceful shutdown.
        task1.stopGracefully();
        // Wait for the task to finish. The status doesn't really matter.
        while (!statusFuture.isDone()) {
            Thread.sleep(50);
        }
    }
    // Second run:
    {
        final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
        final TaskToolbox taskToolbox = makeToolbox(task2, taskStorage, mdc, directory);
        final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
        // Wait for firehose to show up, it starts off null.
        while (task2.getFirehose() == null) {
            Thread.sleep(50);
        }
        // Stop the firehose again, this will start another handoff.
        final TestFirehose firehose = (TestFirehose) task2.getFirehose();
        // Stop the firehose, this will trigger a finishJob.
        firehose.close();
        // publishedSegment is still published. No reason it shouldn't be.
        Assert.assertEquals(ImmutableSet.of(publishedSegment), mdc.getPublished());
        // Wait for a handoffCallback to show up.
        while (handOffCallbacks.isEmpty()) {
            Thread.sleep(50);
        }
        // Simulate handoff.
        for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
            final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
            Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
            executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
        }
        handOffCallbacks.clear();
        // Wait for the task to finish.
        final TaskStatus taskStatus = statusFuture.get();
        Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
    }
}
Also used : TaskStorageConfig(io.druid.indexing.common.config.TaskStorageConfig) HeapMemoryTaskStorage(io.druid.indexing.overlord.HeapMemoryTaskStorage) TaskStatus(io.druid.indexing.common.TaskStatus) DataSegment(io.druid.timeline.DataSegment) TaskToolbox(io.druid.indexing.common.TaskToolbox) Executor(java.util.concurrent.Executor) TaskStorage(io.druid.indexing.overlord.TaskStorage) HeapMemoryTaskStorage(io.druid.indexing.overlord.HeapMemoryTaskStorage) TestIndexerMetadataStorageCoordinator(io.druid.indexing.test.TestIndexerMetadataStorageCoordinator) SegmentDescriptor(io.druid.query.SegmentDescriptor) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) InputRow(io.druid.data.input.InputRow) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) File(java.io.File) Pair(io.druid.java.util.common.Pair) Test(org.junit.Test)

Aggregations

TaskStorage (io.druid.indexing.overlord.TaskStorage)2 File (java.io.File)2 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)1 InputRow (io.druid.data.input.InputRow)1 MapBasedInputRow (io.druid.data.input.MapBasedInputRow)1 TaskStatus (io.druid.indexing.common.TaskStatus)1 TaskToolbox (io.druid.indexing.common.TaskToolbox)1 TaskStorageConfig (io.druid.indexing.common.config.TaskStorageConfig)1 KafkaIndexTaskClient (io.druid.indexing.kafka.KafkaIndexTaskClient)1 TestBroker (io.druid.indexing.kafka.test.TestBroker)1 HeapMemoryTaskStorage (io.druid.indexing.overlord.HeapMemoryTaskStorage)1 IndexerMetadataStorageCoordinator (io.druid.indexing.overlord.IndexerMetadataStorageCoordinator)1 TaskMaster (io.druid.indexing.overlord.TaskMaster)1 TaskQueue (io.druid.indexing.overlord.TaskQueue)1 TaskRunner (io.druid.indexing.overlord.TaskRunner)1 TestIndexerMetadataStorageCoordinator (io.druid.indexing.test.TestIndexerMetadataStorageCoordinator)1 Pair (io.druid.java.util.common.Pair)1 SegmentDescriptor (io.druid.query.SegmentDescriptor)1 DataSegment (io.druid.timeline.DataSegment)1 Executor (java.util.concurrent.Executor)1