Search in sources :

Example 56 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class JettyQosTest method testQoS.

@Test(timeout = 60_000L)
public void testQoS() throws Exception {
    final int fastThreads = 20;
    final int slowThreads = 15;
    final int slowRequestsPerThread = 5;
    final int fastRequestsPerThread = 200;
    final HttpClient fastClient = new ClientHolder(fastThreads).getClient();
    final HttpClient slowClient = new ClientHolder(slowThreads).getClient();
    final ExecutorService fastPool = Execs.multiThreaded(fastThreads, "fast-%d");
    final ExecutorService slowPool = Execs.multiThreaded(slowThreads, "slow-%d");
    final CountDownLatch latch = new CountDownLatch(fastThreads * fastRequestsPerThread);
    final AtomicLong fastCount = new AtomicLong();
    final AtomicLong slowCount = new AtomicLong();
    final AtomicLong fastElapsed = new AtomicLong();
    final AtomicLong slowElapsed = new AtomicLong();
    for (int i = 0; i < slowThreads; i++) {
        slowPool.submit(new Runnable() {

            @Override
            public void run() {
                for (int i = 0; i < slowRequestsPerThread; i++) {
                    long startTime = System.currentTimeMillis();
                    try {
                        ListenableFuture<StatusResponseHolder> go = slowClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/slow/hello")), new StatusResponseHandler(Charset.defaultCharset()));
                        go.get();
                        slowCount.incrementAndGet();
                        slowElapsed.addAndGet(System.currentTimeMillis() - startTime);
                    } catch (InterruptedException e) {
                    // BE COOL
                    } catch (Exception e) {
                        e.printStackTrace();
                        throw Throwables.propagate(e);
                    }
                }
            }
        });
    }
    // wait for jetty server pool to completely fill up
    while (server.getThreadPool().getIdleThreads() != 0) {
        Thread.sleep(25);
    }
    for (int i = 0; i < fastThreads; i++) {
        fastPool.submit(new Runnable() {

            @Override
            public void run() {
                for (int i = 0; i < fastRequestsPerThread; i++) {
                    long startTime = System.currentTimeMillis();
                    try {
                        ListenableFuture<StatusResponseHolder> go = fastClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/default")), new StatusResponseHandler(Charset.defaultCharset()));
                        go.get();
                        fastCount.incrementAndGet();
                        fastElapsed.addAndGet(System.currentTimeMillis() - startTime);
                        latch.countDown();
                    } catch (InterruptedException e) {
                    // BE COOL
                    } catch (Exception e) {
                        e.printStackTrace();
                        throw Throwables.propagate(e);
                    }
                }
            }
        });
    }
    // Wait for all fast requests to be served
    latch.await();
    slowPool.shutdownNow();
    fastPool.shutdown();
    // check that fast requests finished quickly
    Assert.assertTrue(fastElapsed.get() / fastCount.get() < 500);
}
Also used : Request(com.metamx.http.client.Request) CountDownLatch(java.util.concurrent.CountDownLatch) URL(java.net.URL) AtomicLong(java.util.concurrent.atomic.AtomicLong) HttpClient(com.metamx.http.client.HttpClient) ExecutorService(java.util.concurrent.ExecutorService) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) StatusResponseHandler(com.metamx.http.client.response.StatusResponseHandler) Test(org.junit.Test)

Example 57 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class KafkaSupervisor method generateReport.

private KafkaSupervisorReport generateReport(boolean includeOffsets) {
    int numPartitions = 0;
    for (Map<Integer, Long> partitionGroup : partitionGroups.values()) {
        numPartitions += partitionGroup.size();
    }
    KafkaSupervisorReport report = new KafkaSupervisorReport(dataSource, DateTime.now(), ioConfig.getTopic(), numPartitions, ioConfig.getReplicas(), ioConfig.getTaskDuration().getMillis() / 1000);
    List<TaskReportData> taskReports = Lists.newArrayList();
    List<ListenableFuture<Map<Integer, Long>>> futures = Lists.newArrayList();
    try {
        for (TaskGroup taskGroup : taskGroups.values()) {
            for (Map.Entry<String, TaskData> entry : taskGroup.tasks.entrySet()) {
                String taskId = entry.getKey();
                DateTime startTime = entry.getValue().startTime;
                Long remainingSeconds = null;
                if (startTime != null) {
                    remainingSeconds = Math.max(0, ioConfig.getTaskDuration().getMillis() - (DateTime.now().getMillis() - startTime.getMillis())) / 1000;
                }
                taskReports.add(new TaskReportData(taskId, (includeOffsets ? taskGroup.partitionOffsets : null), null, startTime, remainingSeconds, TaskReportData.TaskType.ACTIVE));
                if (includeOffsets) {
                    futures.add(taskClient.getCurrentOffsetsAsync(taskId, false));
                }
            }
        }
        for (List<TaskGroup> taskGroups : pendingCompletionTaskGroups.values()) {
            for (TaskGroup taskGroup : taskGroups) {
                for (Map.Entry<String, TaskData> entry : taskGroup.tasks.entrySet()) {
                    String taskId = entry.getKey();
                    DateTime startTime = entry.getValue().startTime;
                    Long remainingSeconds = null;
                    if (taskGroup.completionTimeout != null) {
                        remainingSeconds = Math.max(0, taskGroup.completionTimeout.getMillis() - DateTime.now().getMillis()) / 1000;
                    }
                    taskReports.add(new TaskReportData(taskId, (includeOffsets ? taskGroup.partitionOffsets : null), null, startTime, remainingSeconds, TaskReportData.TaskType.PUBLISHING));
                    if (includeOffsets) {
                        futures.add(taskClient.getCurrentOffsetsAsync(taskId, false));
                    }
                }
            }
        }
        List<Map<Integer, Long>> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
        for (int i = 0; i < taskReports.size(); i++) {
            TaskReportData reportData = taskReports.get(i);
            if (includeOffsets) {
                reportData.setCurrentOffsets(results.get(i));
            }
            report.addTask(reportData);
        }
    } catch (Exception e) {
        log.warn(e, "Failed to generate status report");
    }
    return report;
}
Also used : DateTime(org.joda.time.DateTime) TimeoutException(java.util.concurrent.TimeoutException) EntryExistsException(io.druid.metadata.EntryExistsException) JsonProcessingException(com.fasterxml.jackson.core.JsonProcessingException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 58 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class KafkaSupervisor method discoverTasks.

private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();
    for (Task task : tasks) {
        if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }
        taskCount++;
        final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
        final String taskId = task.getId();
        // Determine which task group this task belongs to based on one of the partitions handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.
        Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet().iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);
        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId) && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {
                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId), new Function<KafkaIndexTask.Status, Boolean>() {

                    @Override
                    public Boolean apply(KafkaIndexTask.Status status) {
                        if (status == KafkaIndexTask.Status.PUBLISHING) {
                            addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId, kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap());
                            // update partitionGroups with the publishing task's offsets (if they are greater than what is
                            // existing) so that the next tasks will start reading from where this task left off
                            Map<Integer, Long> publishingTaskCurrentOffsets = taskClient.getCurrentOffsets(taskId, true);
                            for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets.entrySet()) {
                                Integer partition = entry.getKey();
                                Long offset = entry.getValue();
                                ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups.get(getTaskGroupIdForPartition(partition));
                                boolean succeeded;
                                do {
                                    succeeded = true;
                                    Long previousOffset = partitionOffsets.putIfAbsent(partition, offset);
                                    if (previousOffset != null && previousOffset < offset) {
                                        succeeded = partitionOffsets.replace(partition, previousOffset, offset);
                                    }
                                } while (!succeeded);
                            }
                        } else {
                            for (Integer partition : kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()) {
                                if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
                                    log.warn("Stopping task [%s] which does not match the expected partition allocation", taskId);
                                    try {
                                        stopTask(taskId, false).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
                                    } catch (InterruptedException | ExecutionException | TimeoutException e) {
                                        log.warn(e, "Exception while stopping task");
                                    }
                                    return false;
                                }
                            }
                            if (taskGroups.putIfAbsent(taskGroupId, new TaskGroup(ImmutableMap.copyOf(kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap()), kafkaTask.getIOConfig().getMinimumMessageTime())) == null) {
                                log.debug("Created new task group [%d]", taskGroupId);
                            }
                            if (!isTaskCurrent(taskGroupId, taskId)) {
                                log.info("Stopping task [%s] which does not match the expected parameters and ingestion spec", taskId);
                                try {
                                    stopTask(taskId, false).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
                                } catch (InterruptedException | ExecutionException | TimeoutException e) {
                                    log.warn(e, "Exception while stopping task");
                                }
                                return false;
                            } else {
                                taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId, new TaskData());
                            }
                        }
                        return true;
                    }
                }, workerExec));
            }
        }
    }
    List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);
}
Also used : Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) Function(com.google.common.base.Function) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) TaskStatus(io.druid.indexing.common.TaskStatus) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 59 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class KafkaSupervisor method checkTaskDuration.

private void checkTaskDuration() throws InterruptedException, ExecutionException, TimeoutException {
    final List<ListenableFuture<Map<Integer, Long>>> futures = Lists.newArrayList();
    final List<Integer> futureGroupIds = Lists.newArrayList();
    for (Map.Entry<Integer, TaskGroup> entry : taskGroups.entrySet()) {
        Integer groupId = entry.getKey();
        TaskGroup group = entry.getValue();
        // find the longest running task from this group
        DateTime earliestTaskStart = DateTime.now();
        for (TaskData taskData : group.tasks.values()) {
            if (earliestTaskStart.isAfter(taskData.startTime)) {
                earliestTaskStart = taskData.startTime;
            }
        }
        // if this task has run longer than the configured duration, signal all tasks in the group to persist
        if (earliestTaskStart.plus(ioConfig.getTaskDuration()).isBeforeNow()) {
            log.info("Task group [%d] has run for [%s]", groupId, ioConfig.getTaskDuration());
            futureGroupIds.add(groupId);
            futures.add(signalTasksToFinish(groupId));
        }
    }
    List<Map<Integer, Long>> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int j = 0; j < results.size(); j++) {
        Integer groupId = futureGroupIds.get(j);
        TaskGroup group = taskGroups.get(groupId);
        Map<Integer, Long> endOffsets = results.get(j);
        if (endOffsets != null) {
            // set a timeout and put this group in pendingCompletionTaskGroups so that it can be monitored for completion
            group.completionTimeout = DateTime.now().plus(ioConfig.getCompletionTimeout());
            pendingCompletionTaskGroups.putIfAbsent(groupId, Lists.<TaskGroup>newCopyOnWriteArrayList());
            pendingCompletionTaskGroups.get(groupId).add(group);
            // set endOffsets as the next startOffsets
            for (Map.Entry<Integer, Long> entry : endOffsets.entrySet()) {
                partitionGroups.get(groupId).put(entry.getKey(), entry.getValue());
            }
        } else {
            log.warn("All tasks in group [%s] failed to transition to publishing state, killing tasks [%s]", groupId, group.taskIds());
            for (String id : group.taskIds()) {
                killTask(id);
            }
        }
        // remove this task group from the list of current task groups now that it has been handled
        taskGroups.remove(groupId);
    }
}
Also used : DateTime(org.joda.time.DateTime) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 60 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class KafkaIndexTaskClientTest method testGetStatusAsync.

@Test
public void testGetStatusAsync() throws Exception {
    final int numRequests = TEST_IDS.size();
    Capture<Request> captured = Capture.newInstance(CaptureType.ALL);
    expect(responseHolder.getStatus()).andReturn(HttpResponseStatus.OK).anyTimes();
    expect(responseHolder.getContent()).andReturn("\"READING\"").anyTimes();
    expect(httpClient.go(capture(captured), anyObject(FullResponseHandler.class), eq(TEST_HTTP_TIMEOUT))).andReturn(Futures.immediateFuture(responseHolder)).times(numRequests);
    replayAll();
    List<URL> expectedUrls = Lists.newArrayList();
    List<ListenableFuture<KafkaIndexTask.Status>> futures = Lists.newArrayList();
    for (int i = 0; i < numRequests; i++) {
        expectedUrls.add(new URL(String.format(URL_FORMATTER, TEST_HOST, TEST_PORT, TEST_IDS.get(i), "status")));
        futures.add(client.getStatusAsync(TEST_IDS.get(i)));
    }
    List<KafkaIndexTask.Status> responses = Futures.allAsList(futures).get();
    verifyAll();
    List<Request> requests = captured.getValues();
    Assert.assertEquals(numRequests, requests.size());
    Assert.assertEquals(numRequests, responses.size());
    for (int i = 0; i < numRequests; i++) {
        Assert.assertEquals(HttpMethod.GET, requests.get(i).getMethod());
        Assert.assertTrue("unexpectedURL", expectedUrls.contains(requests.get(i).getUrl()));
        Assert.assertEquals(KafkaIndexTask.Status.READING, responses.get(i));
    }
}
Also used : HttpResponseStatus(org.jboss.netty.handler.codec.http.HttpResponseStatus) TaskStatus(io.druid.indexing.common.TaskStatus) Request(com.metamx.http.client.Request) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) URL(java.net.URL) Test(org.junit.Test)

Aggregations

ListenableFuture (com.google.common.util.concurrent.ListenableFuture)192 Test (org.junit.Test)78 ArrayList (java.util.ArrayList)63 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)53 List (java.util.List)50 ExecutionException (java.util.concurrent.ExecutionException)42 Map (java.util.Map)36 IOException (java.io.IOException)35 CountDownLatch (java.util.concurrent.CountDownLatch)26 File (java.io.File)23 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)23 ImmutableList (com.google.common.collect.ImmutableList)20 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)20 Futures (com.google.common.util.concurrent.Futures)19 Callable (java.util.concurrent.Callable)19 ImmutableMap (com.google.common.collect.ImmutableMap)18 HashMap (java.util.HashMap)16 Lists (com.google.common.collect.Lists)14 URL (java.net.URL)14 Set (java.util.Set)14