Search in sources :

Example 1 with TaskStatus

use of io.druid.indexing.common.TaskStatus in project druid by druid-io.

the class KafkaIndexTaskClient method submitRequest.

private FullResponseHolder submitRequest(String id, HttpMethod method, String pathSuffix, String query, byte[] content, boolean retry) {
    final RetryPolicy retryPolicy = retryPolicyFactory.makeRetryPolicy();
    while (true) {
        FullResponseHolder response = null;
        Request request = null;
        TaskLocation location = TaskLocation.unknown();
        String path = String.format("%s/%s/%s", BASE_PATH, id, pathSuffix);
        Optional<TaskStatus> status = taskInfoProvider.getTaskStatus(id);
        if (!status.isPresent() || !status.get().isRunnable()) {
            throw new TaskNotRunnableException(String.format("Aborting request because task [%s] is not runnable", id));
        }
        try {
            location = taskInfoProvider.getTaskLocation(id);
            if (location.equals(TaskLocation.unknown())) {
                throw new NoTaskLocationException(String.format("No TaskLocation available for task [%s]", id));
            }
            // Netty throws some annoying exceptions if a connection can't be opened, which happens relatively frequently
            // for tasks that happen to still be starting up, so test the connection first to keep the logs clean.
            checkConnection(location.getHost(), location.getPort());
            try {
                URI serviceUri = new URI("http", null, location.getHost(), location.getPort(), path, query, null);
                request = new Request(method, serviceUri.toURL());
                // used to validate that we are talking to the correct worker
                request.addHeader(ChatHandlerResource.TASK_ID_HEADER, id);
                if (content.length > 0) {
                    request.setContent(MediaType.APPLICATION_JSON, content);
                }
                log.debug("HTTP %s: %s", method.getName(), serviceUri.toString());
                response = httpClient.go(request, new FullResponseHandler(Charsets.UTF_8), httpTimeout).get();
            } catch (Exception e) {
                Throwables.propagateIfInstanceOf(e.getCause(), IOException.class);
                Throwables.propagateIfInstanceOf(e.getCause(), ChannelException.class);
                throw Throwables.propagate(e);
            }
            int responseCode = response.getStatus().getCode();
            if (responseCode / 100 == 2) {
                return response;
            } else if (responseCode == 400) {
                // don't bother retrying if it's a bad request
                throw new IAE("Received 400 Bad Request with body: %s", response.getContent());
            } else {
                throw new IOException(String.format("Received status [%d]", responseCode));
            }
        } catch (IOException | ChannelException e) {
            // Since workers are free to move tasks around to different ports, there is a chance that a task may have been
            // moved but our view of its location has not been updated yet from ZK. To detect this case, we send a header
            // identifying our expected recipient in the request; if this doesn't correspond to the worker we messaged, the
            // worker will return an HTTP 404 with its ID in the response header. If we get a mismatching task ID, then
            // we will wait for a short period then retry the request indefinitely, expecting the task's location to
            // eventually be updated.
            final Duration delay;
            if (response != null && response.getStatus().equals(HttpResponseStatus.NOT_FOUND)) {
                String headerId = response.getResponse().headers().get(ChatHandlerResource.TASK_ID_HEADER);
                if (headerId != null && !headerId.equals(id)) {
                    log.warn("Expected worker to have taskId [%s] but has taskId [%s], will retry in [%d]s", id, headerId, TASK_MISMATCH_RETRY_DELAY_SECONDS);
                    delay = Duration.standardSeconds(TASK_MISMATCH_RETRY_DELAY_SECONDS);
                } else {
                    delay = retryPolicy.getAndIncrementRetryDelay();
                }
            } else {
                delay = retryPolicy.getAndIncrementRetryDelay();
            }
            String urlForLog = (request != null ? request.getUrl().toString() : String.format("http://%s:%d%s", location.getHost(), location.getPort(), path));
            if (!retry) {
                // if retry=false, we probably aren't too concerned if the operation doesn't succeed (i.e. the request was
                // for informational purposes only) so don't log a scary stack trace
                log.info("submitRequest failed for [%s], with message [%s]", urlForLog, e.getMessage());
                Throwables.propagate(e);
            } else if (delay == null) {
                log.warn(e, "Retries exhausted for [%s], last exception:", urlForLog);
                Throwables.propagate(e);
            } else {
                try {
                    final long sleepTime = delay.getMillis();
                    log.debug("Bad response HTTP [%s] from [%s]; will try again in [%s] (body/exception: [%s])", (response != null ? response.getStatus().getCode() : "no response"), urlForLog, new Duration(sleepTime).toString(), (response != null ? response.getContent() : e.getMessage()));
                    Thread.sleep(sleepTime);
                } catch (InterruptedException e2) {
                    Throwables.propagate(e2);
                }
            }
        } catch (NoTaskLocationException e) {
            log.info("No TaskLocation available for task [%s], this task may not have been assigned to a worker yet or " + "may have already completed", id);
            throw e;
        } catch (Exception e) {
            log.warn(e, "Exception while sending request");
            throw e;
        }
    }
}
Also used : Request(com.metamx.http.client.Request) Duration(org.joda.time.Duration) IOException(java.io.IOException) TaskStatus(io.druid.indexing.common.TaskStatus) IAE(io.druid.java.util.common.IAE) URI(java.net.URI) TaskLocation(io.druid.indexing.common.TaskLocation) ChannelException(org.jboss.netty.channel.ChannelException) IOException(java.io.IOException) FullResponseHandler(com.metamx.http.client.response.FullResponseHandler) FullResponseHolder(com.metamx.http.client.response.FullResponseHolder) RetryPolicy(io.druid.indexing.common.RetryPolicy) ChannelException(org.jboss.netty.channel.ChannelException)

Example 2 with TaskStatus

use of io.druid.indexing.common.TaskStatus in project druid by druid-io.

the class KafkaIndexTaskTest method testRunConflictingWithoutTransactions.

@Test(timeout = 60_000L)
public void testRunConflictingWithoutTransactions() throws Exception {
    final KafkaIndexTask task1 = createTask(null, new KafkaIOConfig("sequence0", new KafkaPartitions("topic0", ImmutableMap.of(0, 2L)), new KafkaPartitions("topic0", ImmutableMap.of(0, 5L)), kafkaServer.consumerProperties(), false, false, null), null, null);
    final KafkaIndexTask task2 = createTask(null, new KafkaIOConfig("sequence1", new KafkaPartitions("topic0", ImmutableMap.of(0, 3L)), new KafkaPartitions("topic0", ImmutableMap.of(0, 8L)), kafkaServer.consumerProperties(), false, false, null), null, null);
    // Insert data
    try (final KafkaProducer<byte[], byte[]> kafkaProducer = kafkaServer.newProducer()) {
        for (ProducerRecord<byte[], byte[]> record : RECORDS) {
            kafkaProducer.send(record).get();
        }
    }
    // Run first task
    final ListenableFuture<TaskStatus> future1 = runTask(task1);
    Assert.assertEquals(TaskStatus.Status.SUCCESS, future1.get().getStatusCode());
    // Check published segments & metadata
    SegmentDescriptor desc1 = SD(task1, "2010/P1D", 0);
    SegmentDescriptor desc2 = SD(task1, "2011/P1D", 0);
    Assert.assertEquals(ImmutableSet.of(desc1, desc2), publishedDescriptors());
    Assert.assertNull(metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()));
    // Run second task
    final ListenableFuture<TaskStatus> future2 = runTask(task2);
    Assert.assertEquals(TaskStatus.Status.SUCCESS, future2.get().getStatusCode());
    // Check metrics
    Assert.assertEquals(3, task1.getFireDepartmentMetrics().processed());
    Assert.assertEquals(0, task1.getFireDepartmentMetrics().unparseable());
    Assert.assertEquals(0, task1.getFireDepartmentMetrics().thrownAway());
    Assert.assertEquals(3, task2.getFireDepartmentMetrics().processed());
    Assert.assertEquals(2, task2.getFireDepartmentMetrics().unparseable());
    Assert.assertEquals(0, task2.getFireDepartmentMetrics().thrownAway());
    // Check published segments & metadata
    SegmentDescriptor desc3 = SD(task2, "2011/P1D", 1);
    SegmentDescriptor desc4 = SD(task2, "2013/P1D", 0);
    Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3, desc4), publishedDescriptors());
    Assert.assertNull(metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()));
    // Check segments in deep storage
    Assert.assertEquals(ImmutableList.of("c"), readSegmentDim1(desc1));
    Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentDim1(desc2));
    Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentDim1(desc3));
    Assert.assertEquals(ImmutableList.of("f"), readSegmentDim1(desc4));
}
Also used : SegmentDescriptor(io.druid.query.SegmentDescriptor) TaskStatus(io.druid.indexing.common.TaskStatus) Test(org.junit.Test)

Example 3 with TaskStatus

use of io.druid.indexing.common.TaskStatus in project druid by druid-io.

the class KafkaIndexTaskTest method testRunTwoTasksTwoPartitions.

@Test(timeout = 60_000L)
public void testRunTwoTasksTwoPartitions() throws Exception {
    final KafkaIndexTask task1 = createTask(null, new KafkaIOConfig("sequence0", new KafkaPartitions("topic0", ImmutableMap.of(0, 2L)), new KafkaPartitions("topic0", ImmutableMap.of(0, 5L)), kafkaServer.consumerProperties(), true, false, null), null, null);
    final KafkaIndexTask task2 = createTask(null, new KafkaIOConfig("sequence1", new KafkaPartitions("topic0", ImmutableMap.of(1, 0L)), new KafkaPartitions("topic0", ImmutableMap.of(1, 1L)), kafkaServer.consumerProperties(), true, false, null), null, null);
    final ListenableFuture<TaskStatus> future1 = runTask(task1);
    final ListenableFuture<TaskStatus> future2 = runTask(task2);
    // Insert data
    try (final KafkaProducer<byte[], byte[]> kafkaProducer = kafkaServer.newProducer()) {
        for (ProducerRecord<byte[], byte[]> record : RECORDS) {
            kafkaProducer.send(record).get();
        }
    }
    // Wait for tasks to exit
    Assert.assertEquals(TaskStatus.Status.SUCCESS, future1.get().getStatusCode());
    Assert.assertEquals(TaskStatus.Status.SUCCESS, future2.get().getStatusCode());
    // Check metrics
    Assert.assertEquals(3, task1.getFireDepartmentMetrics().processed());
    Assert.assertEquals(0, task1.getFireDepartmentMetrics().unparseable());
    Assert.assertEquals(0, task1.getFireDepartmentMetrics().thrownAway());
    Assert.assertEquals(1, task2.getFireDepartmentMetrics().processed());
    Assert.assertEquals(0, task2.getFireDepartmentMetrics().unparseable());
    Assert.assertEquals(0, task2.getFireDepartmentMetrics().thrownAway());
    // Check published segments & metadata
    SegmentDescriptor desc1 = SD(task1, "2010/P1D", 0);
    SegmentDescriptor desc2 = SD(task1, "2011/P1D", 0);
    SegmentDescriptor desc3 = SD(task2, "2012/P1D", 0);
    Assert.assertEquals(ImmutableSet.of(desc1, desc2, desc3), publishedDescriptors());
    Assert.assertEquals(new KafkaDataSourceMetadata(new KafkaPartitions("topic0", ImmutableMap.of(0, 5L, 1, 1L))), metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()));
    // Check segments in deep storage
    Assert.assertEquals(ImmutableList.of("c"), readSegmentDim1(desc1));
    Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentDim1(desc2));
    Assert.assertEquals(ImmutableList.of("g"), readSegmentDim1(desc3));
}
Also used : SegmentDescriptor(io.druid.query.SegmentDescriptor) TaskStatus(io.druid.indexing.common.TaskStatus) Test(org.junit.Test)

Example 4 with TaskStatus

use of io.druid.indexing.common.TaskStatus in project druid by druid-io.

the class KafkaIndexTaskTest method testRunReplicas.

@Test(timeout = 60_000L)
public void testRunReplicas() throws Exception {
    final KafkaIndexTask task1 = createTask(null, new KafkaIOConfig("sequence0", new KafkaPartitions("topic0", ImmutableMap.of(0, 2L)), new KafkaPartitions("topic0", ImmutableMap.of(0, 5L)), kafkaServer.consumerProperties(), true, false, null), null, null);
    final KafkaIndexTask task2 = createTask(null, new KafkaIOConfig("sequence0", new KafkaPartitions("topic0", ImmutableMap.of(0, 2L)), new KafkaPartitions("topic0", ImmutableMap.of(0, 5L)), kafkaServer.consumerProperties(), true, false, null), null, null);
    final ListenableFuture<TaskStatus> future1 = runTask(task1);
    final ListenableFuture<TaskStatus> future2 = runTask(task2);
    // Insert data
    try (final KafkaProducer<byte[], byte[]> kafkaProducer = kafkaServer.newProducer()) {
        for (ProducerRecord<byte[], byte[]> record : RECORDS) {
            kafkaProducer.send(record).get();
        }
    }
    // Wait for tasks to exit
    Assert.assertEquals(TaskStatus.Status.SUCCESS, future1.get().getStatusCode());
    Assert.assertEquals(TaskStatus.Status.SUCCESS, future2.get().getStatusCode());
    // Check metrics
    Assert.assertEquals(3, task1.getFireDepartmentMetrics().processed());
    Assert.assertEquals(0, task1.getFireDepartmentMetrics().unparseable());
    Assert.assertEquals(0, task1.getFireDepartmentMetrics().thrownAway());
    Assert.assertEquals(3, task2.getFireDepartmentMetrics().processed());
    Assert.assertEquals(0, task2.getFireDepartmentMetrics().unparseable());
    Assert.assertEquals(0, task2.getFireDepartmentMetrics().thrownAway());
    // Check published segments & metadata
    SegmentDescriptor desc1 = SD(task1, "2010/P1D", 0);
    SegmentDescriptor desc2 = SD(task1, "2011/P1D", 0);
    Assert.assertEquals(ImmutableSet.of(desc1, desc2), publishedDescriptors());
    Assert.assertEquals(new KafkaDataSourceMetadata(new KafkaPartitions("topic0", ImmutableMap.of(0, 5L))), metadataStorageCoordinator.getDataSourceMetadata(DATA_SCHEMA.getDataSource()));
    // Check segments in deep storage
    Assert.assertEquals(ImmutableList.of("c"), readSegmentDim1(desc1));
    Assert.assertEquals(ImmutableList.of("d", "e"), readSegmentDim1(desc2));
}
Also used : SegmentDescriptor(io.druid.query.SegmentDescriptor) TaskStatus(io.druid.indexing.common.TaskStatus) Test(org.junit.Test)

Example 5 with TaskStatus

use of io.druid.indexing.common.TaskStatus in project druid by druid-io.

the class KafkaIndexTaskTest method runTask.

private ListenableFuture<TaskStatus> runTask(final Task task) {
    try {
        taskStorage.insert(task, TaskStatus.running(task.getId()));
    } catch (EntryExistsException e) {
    // suppress
    }
    taskLockbox.syncFromStorage();
    final TaskToolbox toolbox = toolboxFactory.build(task);
    synchronized (runningTasks) {
        runningTasks.add(task);
    }
    return taskExec.submit(new Callable<TaskStatus>() {

        @Override
        public TaskStatus call() throws Exception {
            try {
                if (task.isReady(toolbox.getTaskActionClient())) {
                    return task.run(toolbox);
                } else {
                    throw new ISE("Task is not ready");
                }
            } catch (Exception e) {
                log.warn(e, "Task failed");
                return TaskStatus.failure(task.getId());
            }
        }
    });
}
Also used : TaskToolbox(io.druid.indexing.common.TaskToolbox) EntryExistsException(io.druid.metadata.EntryExistsException) ISE(io.druid.java.util.common.ISE) TaskStatus(io.druid.indexing.common.TaskStatus) EntryExistsException(io.druid.metadata.EntryExistsException) IOException(java.io.IOException) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

TaskStatus (io.druid.indexing.common.TaskStatus)49 Test (org.junit.Test)37 SegmentDescriptor (io.druid.query.SegmentDescriptor)17 Task (io.druid.indexing.common.task.Task)16 TaskToolbox (io.druid.indexing.common.TaskToolbox)13 RealtimeIndexTask (io.druid.indexing.common.task.RealtimeIndexTask)12 DataSegment (io.druid.timeline.DataSegment)12 AbstractFixedIntervalTask (io.druid.indexing.common.task.AbstractFixedIntervalTask)11 IndexTask (io.druid.indexing.common.task.IndexTask)11 KillTask (io.druid.indexing.common.task.KillTask)11 FireDepartmentTest (io.druid.segment.realtime.FireDepartmentTest)10 TestIndexerMetadataStorageCoordinator (io.druid.indexing.test.TestIndexerMetadataStorageCoordinator)8 File (java.io.File)8 Interval (org.joda.time.Interval)8 MapBasedInputRow (io.druid.data.input.MapBasedInputRow)7 IOException (java.io.IOException)7 ISE (io.druid.java.util.common.ISE)6 Pair (io.druid.java.util.common.Pair)6 ListenableFuture (com.google.common.util.concurrent.ListenableFuture)4 Map (java.util.Map)4