Search in sources :

Example 1 with TaskRunner

use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.

the class KafkaSupervisorTest method testLatestOffset.

@Test
public /**
   * Test generating the starting offsets from the partition high water marks in Kafka.
   */
void testLatestOffset() throws Exception {
    supervisor = getSupervisor(1, 1, false, "PT1H", null);
    addSomeEvents(1100);
    Capture<KafkaIndexTask> captured = Capture.newInstance();
    expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
    expect(taskMaster.getTaskRunner()).andReturn(Optional.<TaskRunner>absent()).anyTimes();
    expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.<Task>of()).anyTimes();
    expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(null)).anyTimes();
    expect(taskQueue.add(capture(captured))).andReturn(true);
    replayAll();
    supervisor.start();
    supervisor.runInternal();
    verifyAll();
    KafkaIndexTask task = captured.getValue();
    Assert.assertEquals(1100L, (long) task.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(0));
    Assert.assertEquals(1100L, (long) task.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(1));
    Assert.assertEquals(1100L, (long) task.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(2));
}
Also used : RealtimeIndexTask(io.druid.indexing.common.task.RealtimeIndexTask) Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaDataSourceMetadata(io.druid.indexing.kafka.KafkaDataSourceMetadata) TaskRunner(io.druid.indexing.overlord.TaskRunner) Test(org.junit.Test)

Example 2 with TaskRunner

use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.

the class KafkaSupervisorTest method testDatasourceMetadata.

@Test
public /**
   * Test generating the starting offsets from the partition data stored in druid_dataSource which contains the
   * offsets of the last built segments.
   */
void testDatasourceMetadata() throws Exception {
    supervisor = getSupervisor(1, 1, true, "PT1H", null);
    addSomeEvents(100);
    Capture<KafkaIndexTask> captured = Capture.newInstance();
    expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
    expect(taskMaster.getTaskRunner()).andReturn(Optional.<TaskRunner>absent()).anyTimes();
    expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.<Task>of()).anyTimes();
    expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(new KafkaPartitions(KAFKA_TOPIC, ImmutableMap.of(0, 10L, 1, 20L, 2, 30L)))).anyTimes();
    expect(taskQueue.add(capture(captured))).andReturn(true);
    replayAll();
    supervisor.start();
    supervisor.runInternal();
    verifyAll();
    KafkaIndexTask task = captured.getValue();
    KafkaIOConfig taskConfig = task.getIOConfig();
    Assert.assertEquals(String.format("sequenceName-0", DATASOURCE), taskConfig.getBaseSequenceName());
    Assert.assertEquals(10L, (long) taskConfig.getStartPartitions().getPartitionOffsetMap().get(0));
    Assert.assertEquals(20L, (long) taskConfig.getStartPartitions().getPartitionOffsetMap().get(1));
    Assert.assertEquals(30L, (long) taskConfig.getStartPartitions().getPartitionOffsetMap().get(2));
}
Also used : RealtimeIndexTask(io.druid.indexing.common.task.RealtimeIndexTask) Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaPartitions(io.druid.indexing.kafka.KafkaPartitions) KafkaIOConfig(io.druid.indexing.kafka.KafkaIOConfig) KafkaDataSourceMetadata(io.druid.indexing.kafka.KafkaDataSourceMetadata) TaskRunner(io.druid.indexing.overlord.TaskRunner) Test(org.junit.Test)

Example 3 with TaskRunner

use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.

the class KafkaSupervisorTest method testMultiTask.

@Test
public void testMultiTask() throws Exception {
    supervisor = getSupervisor(1, 2, true, "PT1H", null);
    addSomeEvents(1);
    Capture<KafkaIndexTask> captured = Capture.newInstance(CaptureType.ALL);
    expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
    expect(taskMaster.getTaskRunner()).andReturn(Optional.<TaskRunner>absent()).anyTimes();
    expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.<Task>of()).anyTimes();
    expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(null)).anyTimes();
    expect(taskQueue.add(capture(captured))).andReturn(true).times(2);
    replayAll();
    supervisor.start();
    supervisor.runInternal();
    verifyAll();
    KafkaIndexTask task1 = captured.getValues().get(0);
    Assert.assertEquals(2, task1.getIOConfig().getStartPartitions().getPartitionOffsetMap().size());
    Assert.assertEquals(2, task1.getIOConfig().getEndPartitions().getPartitionOffsetMap().size());
    Assert.assertEquals(0L, (long) task1.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(0));
    Assert.assertEquals(Long.MAX_VALUE, (long) task1.getIOConfig().getEndPartitions().getPartitionOffsetMap().get(0));
    Assert.assertEquals(0L, (long) task1.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(2));
    Assert.assertEquals(Long.MAX_VALUE, (long) task1.getIOConfig().getEndPartitions().getPartitionOffsetMap().get(2));
    KafkaIndexTask task2 = captured.getValues().get(1);
    Assert.assertEquals(1, task2.getIOConfig().getStartPartitions().getPartitionOffsetMap().size());
    Assert.assertEquals(1, task2.getIOConfig().getEndPartitions().getPartitionOffsetMap().size());
    Assert.assertEquals(0L, (long) task2.getIOConfig().getStartPartitions().getPartitionOffsetMap().get(1));
    Assert.assertEquals(Long.MAX_VALUE, (long) task2.getIOConfig().getEndPartitions().getPartitionOffsetMap().get(1));
}
Also used : RealtimeIndexTask(io.druid.indexing.common.task.RealtimeIndexTask) Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaDataSourceMetadata(io.druid.indexing.kafka.KafkaDataSourceMetadata) TaskRunner(io.druid.indexing.overlord.TaskRunner) Test(org.junit.Test)

Example 4 with TaskRunner

use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.

the class KafkaSupervisorTest method setUp.

@Before
public void setUp() throws Exception {
    taskStorage = createMock(TaskStorage.class);
    taskMaster = createMock(TaskMaster.class);
    taskRunner = createMock(TaskRunner.class);
    indexerMetadataStorageCoordinator = createMock(IndexerMetadataStorageCoordinator.class);
    taskClient = createMock(KafkaIndexTaskClient.class);
    taskQueue = createMock(TaskQueue.class);
    zkServer = new TestingCluster(1);
    zkServer.start();
    kafkaServer = new TestBroker(zkServer.getConnectString(), tempFolder.newFolder(), 1, ImmutableMap.of("num.partitions", String.valueOf(NUM_PARTITIONS)));
    kafkaServer.start();
    kafkaHost = String.format("localhost:%d", kafkaServer.getPort());
    dataSchema = getDataSchema(DATASOURCE);
    tuningConfig = new KafkaSupervisorTuningConfig(1000, 50000, new Period("P1Y"), new File("/test"), null, null, true, false, null, null, numThreads, TEST_CHAT_THREADS, TEST_CHAT_RETRIES, TEST_HTTP_TIMEOUT, TEST_SHUTDOWN_TIMEOUT);
}
Also used : IndexerMetadataStorageCoordinator(io.druid.indexing.overlord.IndexerMetadataStorageCoordinator) TestingCluster(org.apache.curator.test.TestingCluster) TaskStorage(io.druid.indexing.overlord.TaskStorage) TestBroker(io.druid.indexing.kafka.test.TestBroker) KafkaIndexTaskClient(io.druid.indexing.kafka.KafkaIndexTaskClient) TaskQueue(io.druid.indexing.overlord.TaskQueue) Period(org.joda.time.Period) TaskMaster(io.druid.indexing.overlord.TaskMaster) File(java.io.File) TaskRunner(io.druid.indexing.overlord.TaskRunner) Before(org.junit.Before)

Example 5 with TaskRunner

use of io.druid.indexing.overlord.TaskRunner in project druid by druid-io.

the class KafkaSupervisorTest method testBadMetadataOffsets.

@Test(expected = ISE.class)
public void testBadMetadataOffsets() throws Exception {
    supervisor = getSupervisor(1, 1, true, "PT1H", null);
    addSomeEvents(1);
    expect(taskMaster.getTaskRunner()).andReturn(Optional.<TaskRunner>absent()).anyTimes();
    expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.<Task>of()).anyTimes();
    expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(new KafkaPartitions(KAFKA_TOPIC, ImmutableMap.of(0, 10L, 1, 20L, 2, 30L)))).anyTimes();
    replayAll();
    supervisor.start();
    supervisor.runInternal();
}
Also used : RealtimeIndexTask(io.druid.indexing.common.task.RealtimeIndexTask) Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaPartitions(io.druid.indexing.kafka.KafkaPartitions) KafkaDataSourceMetadata(io.druid.indexing.kafka.KafkaDataSourceMetadata) TaskRunner(io.druid.indexing.overlord.TaskRunner) Test(org.junit.Test)

Aggregations

TaskRunner (io.druid.indexing.overlord.TaskRunner)10 Task (io.druid.indexing.common.task.Task)7 RealtimeIndexTask (io.druid.indexing.common.task.RealtimeIndexTask)6 KafkaDataSourceMetadata (io.druid.indexing.kafka.KafkaDataSourceMetadata)6 KafkaIndexTask (io.druid.indexing.kafka.KafkaIndexTask)6 Test (org.junit.Test)6 KafkaPartitions (io.druid.indexing.kafka.KafkaPartitions)2 TaskMaster (io.druid.indexing.overlord.TaskMaster)2 Access (io.druid.server.security.Access)2 Action (io.druid.server.security.Action)2 AuthorizationInfo (io.druid.server.security.AuthorizationInfo)2 Resource (io.druid.server.security.Resource)2 Period (org.joda.time.Period)2 Before (org.junit.Before)2 Predicate (com.google.common.base.Predicate)1 ImmutableList (com.google.common.collect.ImmutableList)1 ImmutableMap (com.google.common.collect.ImmutableMap)1 TaskLocation (io.druid.indexing.common.TaskLocation)1 TaskStatus (io.druid.indexing.common.TaskStatus)1 KafkaIOConfig (io.druid.indexing.kafka.KafkaIOConfig)1