Search in sources :

Example 1 with TaskLocation

use of io.druid.indexing.common.TaskLocation in project druid by druid-io.

the class KafkaIndexTaskClient method submitRequest.

private FullResponseHolder submitRequest(String id, HttpMethod method, String pathSuffix, String query, byte[] content, boolean retry) {
    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
    while (true) {
        FullResponseHolder response = null;
        Request request = null;
        TaskLocation location = TaskLocation.unknown();
        String path = String.format("%s/%s/%s", BASE_PATH, id, pathSuffix);
        Optional<TaskStatus> status = taskInfoProvider.getTaskStatus(id);
        if (!status.isPresent() || !status.get().isRunnable()) {
            throw new TaskNotRunnableException(String.format("Aborting request because task [%s] is not runnable", id));
        }
        try {
            location = taskInfoProvider.getTaskLocation(id);
            if (location.equals(TaskLocation.unknown())) {
                throw new NoTaskLocationException(String.format("No TaskLocation available for task [%s]", id));
            }
            // Netty throws some annoying exceptions if a connection can't be opened, which happens relatively frequently
            // for tasks that happen to still be starting up, so test the connection first to keep the logs clean.
            checkConnection(location.getHost(), location.getPort());
            try {
                URI serviceUri = new URI("http", null, location.getHost(), location.getPort(), path, query, null);
                request = new Request(method, serviceUri.toURL());
                // used to validate that we are talking to the correct worker
                request.addHeader(ChatHandlerResource.TASK_ID_HEADER, id);
                if (content.length > 0) {
                    request.setContent(MediaType.APPLICATION_JSON, content);
                }
                log.debug("HTTP %s: %s", method.getName(), serviceUri.toString());
                response = httpClient.go(request, new FullResponseHandler(Charsets.UTF_8), httpTimeout).get();
            } catch (Exception e) {
                Throwables.propagateIfInstanceOf(e.getCause(), IOException.class);
                Throwables.propagateIfInstanceOf(e.getCause(), ChannelException.class);
                throw Throwables.propagate(e);
            }
            int responseCode = response.getStatus().getCode();
            if (responseCode / 100 == 2) {
                return response;
            } else if (responseCode == 400) {
                // don't bother retrying if it's a bad request
                throw new IAE("Received 400 Bad Request with body: %s", response.getContent());
            } else {
                throw new IOException(String.format("Received status [%d]", responseCode));
            }
        } catch (IOException | ChannelException e) {
            // Since workers are free to move tasks around to different ports, there is a chance that a task may have been
            // moved but our view of its location has not been updated yet from ZK. To detect this case, we send a header
            // identifying our expected recipient in the request; if this doesn't correspond to the worker we messaged, the
            // worker will return an HTTP 404 with its ID in the response header. If we get a mismatching task ID, then
            // we will wait for a short period then retry the request indefinitely, expecting the task's location to
            // eventually be updated.
            final Duration delay;
            if (response != null && response.getStatus().equals(HttpResponseStatus.NOT_FOUND)) {
                String headerId = response.getResponse().headers().get(ChatHandlerResource.TASK_ID_HEADER);
                if (headerId != null && !headerId.equals(id)) {
                    log.warn("Expected worker to have taskId [%s] but has taskId [%s], will retry in [%d]s", id, headerId, TASK_MISMATCH_RETRY_DELAY_SECONDS);
                    delay = Duration.standardSeconds(TASK_MISMATCH_RETRY_DELAY_SECONDS);
                } else {
                    delay = retryPolicy.getAndIncrementRetryDelay();
                }
            } else {
                delay = retryPolicy.getAndIncrementRetryDelay();
            }
            String urlForLog = (request != null ? request.getUrl().toString() : String.format("http://%s:%d%s", location.getHost(), location.getPort(), path));
            if (!retry) {
                // if retry=false, we probably aren't too concerned if the operation doesn't succeed (i.e. the request was
                // for informational purposes only) so don't log a scary stack trace
                log.info("submitRequest failed for [%s], with message [%s]", urlForLog, e.getMessage());
                Throwables.propagate(e);
            } else if (delay == null) {
                log.warn(e, "Retries exhausted for [%s], last exception:", urlForLog);
                Throwables.propagate(e);
            } else {
                try {
                    final long sleepTime = delay.getMillis();
                    log.debug("Bad response HTTP [%s] from [%s]; will try again in [%s] (body/exception: [%s])", (response != null ? response.getStatus().getCode() : "no response"), urlForLog, new Duration(sleepTime).toString(), (response != null ? response.getContent() : e.getMessage()));
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    Throwables.propagate(e2);
                }
            }
        } catch (NoTaskLocationException e) {
            log.info("No TaskLocation available for task [%s], this task may not have been assigned to a worker yet or " + "may have already completed", id);
            throw e;
        } catch (Exception e) {
            log.warn(e, "Exception while sending request");
            throw e;
        }
    }
}
Also used : Request(com.metamx.http.client.Request) Duration(org.joda.time.Duration) IOException(java.io.IOException) TaskStatus(io.druid.indexing.common.TaskStatus) IAE(io.druid.java.util.common.IAE) URI(java.net.URI) TaskLocation(io.druid.indexing.common.TaskLocation) ChannelException(org.jboss.netty.channel.ChannelException) IOException(java.io.IOException) FullResponseHandler(com.metamx.http.client.response.FullResponseHandler) FullResponseHolder(com.metamx.http.client.response.FullResponseHolder) RetryPolicy(io.druid.indexing.common.RetryPolicy) ChannelException(org.jboss.netty.channel.ChannelException)

Example 2 with TaskLocation

use of io.druid.indexing.common.TaskLocation in project druid by druid-io.

the class KafkaSupervisorTest method testBeginPublishAndQueueNextTasks.

@Test
public void testBeginPublishAndQueueNextTasks() throws Exception {
    final TaskLocation location = new TaskLocation("testHost", 1234);
    supervisor = getSupervisor(2, 2, true, "PT1M", null);
    addSomeEvents(100);
    Capture<Task> captured = Capture.newInstance(CaptureType.ALL);
    expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
    expect(taskMaster.getTaskRunner()).andReturn(Optional.of(taskRunner)).anyTimes();
    expect(taskRunner.getRunningTasks()).andReturn(Collections.EMPTY_LIST).anyTimes();
    expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.<Task>of()).anyTimes();
    expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(null)).anyTimes();
    expect(taskQueue.add(capture(captured))).andReturn(true).times(4);
    taskRunner.registerListener(anyObject(TaskRunnerListener.class), anyObject(Executor.class));
    replayAll();
    supervisor.start();
    supervisor.runInternal();
    verifyAll();
    List<Task> tasks = captured.getValues();
    Collection workItems = new ArrayList<>();
    for (Task task : tasks) {
        workItems.add(new TestTaskRunnerWorkItem(task.getId(), null, location));
    }
    reset(taskStorage, taskRunner, taskClient, taskQueue);
    captured = Capture.newInstance(CaptureType.ALL);
    expect(taskStorage.getActiveTasks()).andReturn(tasks).anyTimes();
    for (Task task : tasks) {
        expect(taskStorage.getStatus(task.getId())).andReturn(Optional.of(TaskStatus.running(task.getId()))).anyTimes();
        expect(taskStorage.getTask(task.getId())).andReturn(Optional.of(task)).anyTimes();
    }
    expect(taskRunner.getRunningTasks()).andReturn(workItems).anyTimes();
    expect(taskClient.getStatusAsync(anyString())).andReturn(Futures.immediateFuture(KafkaIndexTask.Status.READING)).anyTimes();
    expect(taskClient.getStartTimeAsync(EasyMock.contains("sequenceName-0"))).andReturn(Futures.immediateFuture(DateTime.now().minusMinutes(2))).andReturn(Futures.immediateFuture(DateTime.now()));
    expect(taskClient.getStartTimeAsync(EasyMock.contains("sequenceName-1"))).andReturn(Futures.immediateFuture(DateTime.now())).times(2);
    expect(taskClient.pauseAsync(EasyMock.contains("sequenceName-0"))).andReturn(Futures.immediateFuture((Map<Integer, Long>) ImmutableMap.of(0, 10L, 1, 20L, 2, 30L))).andReturn(Futures.immediateFuture((Map<Integer, Long>) ImmutableMap.of(0, 10L, 1, 15L, 2, 35L)));
    expect(taskClient.setEndOffsetsAsync(EasyMock.contains("sequenceName-0"), EasyMock.eq(ImmutableMap.of(0, 10L, 1, 20L, 2, 35L)), EasyMock.eq(true))).andReturn(Futures.immediateFuture(true)).times(2);
    expect(taskQueue.add(capture(captured))).andReturn(true).times(2);
    replay(taskStorage, taskRunner, taskClient, taskQueue);
    supervisor.runInternal();
    verifyAll();
    for (Task task : captured.getValues()) {
        KafkaIndexTask kafkaIndexTask = (KafkaIndexTask) task;
        Assert.assertEquals(dataSchema, kafkaIndexTask.getDataSchema());
        Assert.assertEquals(KafkaTuningConfig.copyOf(tuningConfig), kafkaIndexTask.getTuningConfig());
        KafkaIOConfig taskConfig = kafkaIndexTask.getIOConfig();
        Assert.assertEquals("sequenceName-0", taskConfig.getBaseSequenceName());
        Assert.assertTrue("isUseTransaction", taskConfig.isUseTransaction());
        Assert.assertFalse("pauseAfterRead", taskConfig.isPauseAfterRead());
        Assert.assertEquals(KAFKA_TOPIC, taskConfig.getStartPartitions().getTopic());
        Assert.assertEquals(10L, (long) taskConfig.getStartPartitions().getPartitionOffsetMap().get(0));
        Assert.assertEquals(20L, (long) taskConfig.getStartPartitions().getPartitionOffsetMap().get(1));
        Assert.assertEquals(35L, (long) taskConfig.getStartPartitions().getPartitionOffsetMap().get(2));
    }
}
Also used : TaskRunnerListener(io.druid.indexing.overlord.TaskRunnerListener) RealtimeIndexTask(io.druid.indexing.common.task.RealtimeIndexTask) Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) ArrayList(java.util.ArrayList) TaskLocation(io.druid.indexing.common.TaskLocation) Executor(java.util.concurrent.Executor) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaIOConfig(io.druid.indexing.kafka.KafkaIOConfig) Collection(java.util.Collection) KafkaDataSourceMetadata(io.druid.indexing.kafka.KafkaDataSourceMetadata) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 3 with TaskLocation

use of io.druid.indexing.common.TaskLocation in project druid by druid-io.

the class KafkaSupervisorTest method testDiscoverExistingPublishingTask.

@Test
public void testDiscoverExistingPublishingTask() throws Exception {
    final TaskLocation location = new TaskLocation("testHost", 1234);
    supervisor = getSupervisor(1, 1, true, "PT1H", null);
    addSomeEvents(1);
    Task task = createKafkaIndexTask("id1", DATASOURCE, "sequenceName-0", new KafkaPartitions("topic", ImmutableMap.of(0, 0L, 1, 0L, 2, 0L)), new KafkaPartitions("topic", ImmutableMap.of(0, Long.MAX_VALUE, 1, Long.MAX_VALUE, 2, Long.MAX_VALUE)), null);
    Collection workItems = new ArrayList<>();
    workItems.add(new TestTaskRunnerWorkItem(task.getId(), null, location));
    Capture<KafkaIndexTask> captured = Capture.newInstance();
    expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
    expect(taskMaster.getTaskRunner()).andReturn(Optional.of(taskRunner)).anyTimes();
    expect(taskRunner.getRunningTasks()).andReturn(workItems).anyTimes();
    expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.of(task)).anyTimes();
    expect(taskStorage.getStatus("id1")).andReturn(Optional.of(TaskStatus.running("id1"))).anyTimes();
    expect(taskStorage.getTask("id1")).andReturn(Optional.of(task)).anyTimes();
    expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(null)).anyTimes();
    expect(taskClient.getStatusAsync("id1")).andReturn(Futures.immediateFuture(KafkaIndexTask.Status.PUBLISHING));
    expect(taskClient.getCurrentOffsetsAsync("id1", false)).andReturn(Futures.immediateFuture((Map<Integer, Long>) ImmutableMap.of(0, 10L, 1, 20L, 2, 30L)));
    expect(taskClient.getCurrentOffsets("id1", true)).andReturn(ImmutableMap.of(0, 10L, 1, 20L, 2, 30L));
    expect(taskQueue.add(capture(captured))).andReturn(true);
    taskRunner.registerListener(anyObject(TaskRunnerListener.class), anyObject(Executor.class));
    replayAll();
    supervisor.start();
    supervisor.runInternal();
    SupervisorReport report = supervisor.getStatus();
    verifyAll();
    Assert.assertEquals(DATASOURCE, report.getId());
    Assert.assertTrue(report.getPayload() instanceof KafkaSupervisorReport.KafkaSupervisorReportPayload);
    KafkaSupervisorReport.KafkaSupervisorReportPayload payload = (KafkaSupervisorReport.KafkaSupervisorReportPayload) report.getPayload();
    Assert.assertEquals(DATASOURCE, payload.getDataSource());
    Assert.assertEquals(3600L, (long) payload.getDurationSeconds());
    Assert.assertEquals(NUM_PARTITIONS, (int) payload.getPartitions());
    Assert.assertEquals(1, (int) payload.getReplicas());
    Assert.assertEquals(KAFKA_TOPIC, payload.getTopic());
    Assert.assertEquals(0, payload.getActiveTasks().size());
    Assert.assertEquals(1, payload.getPublishingTasks().size());
    TaskReportData publishingReport = payload.getPublishingTasks().get(0);
    Assert.assertEquals("id1", publishingReport.getId());
    Assert.assertEquals(ImmutableMap.of(0, 0L, 1, 0L, 2, 0L), publishingReport.getStartingOffsets());
    Assert.assertEquals(ImmutableMap.of(0, 10L, 1, 20L, 2, 30L), publishingReport.getCurrentOffsets());
    KafkaIndexTask capturedTask = captured.getValue();
    Assert.assertEquals(dataSchema, capturedTask.getDataSchema());
    Assert.assertEquals(KafkaTuningConfig.copyOf(tuningConfig), capturedTask.getTuningConfig());
    KafkaIOConfig capturedTaskConfig = capturedTask.getIOConfig();
    Assert.assertEquals(kafkaHost, capturedTaskConfig.getConsumerProperties().get("bootstrap.servers"));
    Assert.assertEquals("myCustomValue", capturedTaskConfig.getConsumerProperties().get("myCustomKey"));
    Assert.assertEquals("sequenceName-0", capturedTaskConfig.getBaseSequenceName());
    Assert.assertTrue("isUseTransaction", capturedTaskConfig.isUseTransaction());
    Assert.assertFalse("pauseAfterRead", capturedTaskConfig.isPauseAfterRead());
    // check that the new task was created with starting offsets matching where the publishing task finished
    Assert.assertEquals(KAFKA_TOPIC, capturedTaskConfig.getStartPartitions().getTopic());
    Assert.assertEquals(10L, (long) capturedTaskConfig.getStartPartitions().getPartitionOffsetMap().get(0));
    Assert.assertEquals(20L, (long) capturedTaskConfig.getStartPartitions().getPartitionOffsetMap().get(1));
    Assert.assertEquals(30L, (long) capturedTaskConfig.getStartPartitions().getPartitionOffsetMap().get(2));
    Assert.assertEquals(KAFKA_TOPIC, capturedTaskConfig.getEndPartitions().getTopic());
    Assert.assertEquals(Long.MAX_VALUE, (long) capturedTaskConfig.getEndPartitions().getPartitionOffsetMap().get(0));
    Assert.assertEquals(Long.MAX_VALUE, (long) capturedTaskConfig.getEndPartitions().getPartitionOffsetMap().get(1));
    Assert.assertEquals(Long.MAX_VALUE, (long) capturedTaskConfig.getEndPartitions().getPartitionOffsetMap().get(2));
}
Also used : TaskRunnerListener(io.druid.indexing.overlord.TaskRunnerListener) RealtimeIndexTask(io.druid.indexing.common.task.RealtimeIndexTask) Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaPartitions(io.druid.indexing.kafka.KafkaPartitions) ArrayList(java.util.ArrayList) TaskLocation(io.druid.indexing.common.TaskLocation) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) Executor(java.util.concurrent.Executor) SupervisorReport(io.druid.indexing.overlord.supervisor.SupervisorReport) KafkaIOConfig(io.druid.indexing.kafka.KafkaIOConfig) Collection(java.util.Collection) KafkaDataSourceMetadata(io.druid.indexing.kafka.KafkaDataSourceMetadata) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 4 with TaskLocation

use of io.druid.indexing.common.TaskLocation in project druid by druid-io.

the class KafkaSupervisorTest method testDiscoverExistingPublishingTaskWithDifferentPartitionAllocation.

@Test
public void testDiscoverExistingPublishingTaskWithDifferentPartitionAllocation() throws Exception {
    final TaskLocation location = new TaskLocation("testHost", 1234);
    supervisor = getSupervisor(1, 1, true, "PT1H", null);
    addSomeEvents(1);
    Task task = createKafkaIndexTask("id1", DATASOURCE, "sequenceName-0", new KafkaPartitions("topic", ImmutableMap.of(0, 0L, 2, 0L)), new KafkaPartitions("topic", ImmutableMap.of(0, Long.MAX_VALUE, 2, Long.MAX_VALUE)), null);
    Collection workItems = new ArrayList<>();
    workItems.add(new TestTaskRunnerWorkItem(task.getId(), null, location));
    Capture<KafkaIndexTask> captured = Capture.newInstance();
    expect(taskMaster.getTaskQueue()).andReturn(Optional.of(taskQueue)).anyTimes();
    expect(taskMaster.getTaskRunner()).andReturn(Optional.of(taskRunner)).anyTimes();
    expect(taskRunner.getRunningTasks()).andReturn(workItems).anyTimes();
    expect(taskStorage.getActiveTasks()).andReturn(ImmutableList.of(task)).anyTimes();
    expect(taskStorage.getStatus("id1")).andReturn(Optional.of(TaskStatus.running("id1"))).anyTimes();
    expect(taskStorage.getTask("id1")).andReturn(Optional.of(task)).anyTimes();
    expect(indexerMetadataStorageCoordinator.getDataSourceMetadata(DATASOURCE)).andReturn(new KafkaDataSourceMetadata(null)).anyTimes();
    expect(taskClient.getStatusAsync("id1")).andReturn(Futures.immediateFuture(KafkaIndexTask.Status.PUBLISHING));
    expect(taskClient.getCurrentOffsetsAsync("id1", false)).andReturn(Futures.immediateFuture((Map<Integer, Long>) ImmutableMap.of(0, 10L, 2, 30L)));
    expect(taskClient.getCurrentOffsets("id1", true)).andReturn(ImmutableMap.of(0, 10L, 2, 30L));
    expect(taskQueue.add(capture(captured))).andReturn(true);
    taskRunner.registerListener(anyObject(TaskRunnerListener.class), anyObject(Executor.class));
    replayAll();
    supervisor.start();
    supervisor.runInternal();
    SupervisorReport report = supervisor.getStatus();
    verifyAll();
    Assert.assertEquals(DATASOURCE, report.getId());
    Assert.assertTrue(report.getPayload() instanceof KafkaSupervisorReport.KafkaSupervisorReportPayload);
    KafkaSupervisorReport.KafkaSupervisorReportPayload payload = (KafkaSupervisorReport.KafkaSupervisorReportPayload) report.getPayload();
    Assert.assertEquals(DATASOURCE, payload.getDataSource());
    Assert.assertEquals(3600L, (long) payload.getDurationSeconds());
    Assert.assertEquals(NUM_PARTITIONS, (int) payload.getPartitions());
    Assert.assertEquals(1, (int) payload.getReplicas());
    Assert.assertEquals(KAFKA_TOPIC, payload.getTopic());
    Assert.assertEquals(0, payload.getActiveTasks().size());
    Assert.assertEquals(1, payload.getPublishingTasks().size());
    TaskReportData publishingReport = payload.getPublishingTasks().get(0);
    Assert.assertEquals("id1", publishingReport.getId());
    Assert.assertEquals(ImmutableMap.of(0, 0L, 2, 0L), publishingReport.getStartingOffsets());
    Assert.assertEquals(ImmutableMap.of(0, 10L, 2, 30L), publishingReport.getCurrentOffsets());
    KafkaIndexTask capturedTask = captured.getValue();
    Assert.assertEquals(dataSchema, capturedTask.getDataSchema());
    Assert.assertEquals(KafkaTuningConfig.copyOf(tuningConfig), capturedTask.getTuningConfig());
    KafkaIOConfig capturedTaskConfig = capturedTask.getIOConfig();
    Assert.assertEquals(kafkaHost, capturedTaskConfig.getConsumerProperties().get("bootstrap.servers"));
    Assert.assertEquals("myCustomValue", capturedTaskConfig.getConsumerProperties().get("myCustomKey"));
    Assert.assertEquals("sequenceName-0", capturedTaskConfig.getBaseSequenceName());
    Assert.assertTrue("isUseTransaction", capturedTaskConfig.isUseTransaction());
    Assert.assertFalse("pauseAfterRead", capturedTaskConfig.isPauseAfterRead());
    // check that the new task was created with starting offsets matching where the publishing task finished
    Assert.assertEquals(KAFKA_TOPIC, capturedTaskConfig.getStartPartitions().getTopic());
    Assert.assertEquals(10L, (long) capturedTaskConfig.getStartPartitions().getPartitionOffsetMap().get(0));
    Assert.assertEquals(0L, (long) capturedTaskConfig.getStartPartitions().getPartitionOffsetMap().get(1));
    Assert.assertEquals(30L, (long) capturedTaskConfig.getStartPartitions().getPartitionOffsetMap().get(2));
    Assert.assertEquals(KAFKA_TOPIC, capturedTaskConfig.getEndPartitions().getTopic());
    Assert.assertEquals(Long.MAX_VALUE, (long) capturedTaskConfig.getEndPartitions().getPartitionOffsetMap().get(0));
    Assert.assertEquals(Long.MAX_VALUE, (long) capturedTaskConfig.getEndPartitions().getPartitionOffsetMap().get(1));
    Assert.assertEquals(Long.MAX_VALUE, (long) capturedTaskConfig.getEndPartitions().getPartitionOffsetMap().get(2));
}
Also used : TaskRunnerListener(io.druid.indexing.overlord.TaskRunnerListener) RealtimeIndexTask(io.druid.indexing.common.task.RealtimeIndexTask) Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) KafkaPartitions(io.druid.indexing.kafka.KafkaPartitions) ArrayList(java.util.ArrayList) TaskLocation(io.druid.indexing.common.TaskLocation) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) Executor(java.util.concurrent.Executor) SupervisorReport(io.druid.indexing.overlord.supervisor.SupervisorReport) KafkaIOConfig(io.druid.indexing.kafka.KafkaIOConfig) Collection(java.util.Collection) KafkaDataSourceMetadata(io.druid.indexing.kafka.KafkaDataSourceMetadata) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) Test(org.junit.Test)

Example 5 with TaskLocation

use of io.druid.indexing.common.TaskLocation in project druid by druid-io.

the class KafkaSupervisor method possiblyRegisterListener.

public void possiblyRegisterListener() {
    if (listenerRegistered) {
        return;
    }
    Optional<TaskRunner> taskRunner = taskMaster.getTaskRunner();
    if (taskRunner.isPresent()) {
        taskRunner.get().registerListener(new TaskRunnerListener() {

            @Override
            public String getListenerId() {
                return supervisorId;
            }

            @Override
            public void locationChanged(final String taskId, final TaskLocation newLocation) {
            // do nothing
            }

            @Override
            public void statusChanged(String taskId, TaskStatus status) {
                notices.add(new RunNotice());
            }
        }, MoreExecutors.sameThreadExecutor());
        listenerRegistered = true;
    }
}
Also used : TaskRunnerListener(io.druid.indexing.overlord.TaskRunnerListener) TaskStatus(io.druid.indexing.common.TaskStatus) TaskLocation(io.druid.indexing.common.TaskLocation) TaskRunner(io.druid.indexing.overlord.TaskRunner)

Aggregations

TaskLocation (io.druid.indexing.common.TaskLocation)10 TaskRunnerListener (io.druid.indexing.overlord.TaskRunnerListener)9 RealtimeIndexTask (io.druid.indexing.common.task.RealtimeIndexTask)8 Task (io.druid.indexing.common.task.Task)8 KafkaDataSourceMetadata (io.druid.indexing.kafka.KafkaDataSourceMetadata)8 KafkaIndexTask (io.druid.indexing.kafka.KafkaIndexTask)8 ArrayList (java.util.ArrayList)8 Collection (java.util.Collection)8 Executor (java.util.concurrent.Executor)8 Test (org.junit.Test)8 ImmutableMap (com.google.common.collect.ImmutableMap)7 Map (java.util.Map)7 KafkaIOConfig (io.druid.indexing.kafka.KafkaIOConfig)5 KafkaPartitions (io.druid.indexing.kafka.KafkaPartitions)5 SupervisorReport (io.druid.indexing.overlord.supervisor.SupervisorReport)3 DateTime (org.joda.time.DateTime)3 TaskStatus (io.druid.indexing.common.TaskStatus)2 Request (com.metamx.http.client.Request)1 FullResponseHandler (com.metamx.http.client.response.FullResponseHandler)1 FullResponseHolder (com.metamx.http.client.response.FullResponseHolder)1