use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class JettyQosTest method testQoS.
@Test(timeout = 60_000L)
public void testQoS() throws Exception {
final int fastThreads = 20;
final int slowThreads = 15;
final int slowRequestsPerThread = 5;
final int fastRequestsPerThread = 200;
final HttpClient fastClient = new ClientHolder(fastThreads).getClient();
final HttpClient slowClient = new ClientHolder(slowThreads).getClient();
final ExecutorService fastPool = Execs.multiThreaded(fastThreads, "fast-%d");
final ExecutorService slowPool = Execs.multiThreaded(slowThreads, "slow-%d");
final CountDownLatch latch = new CountDownLatch(fastThreads * fastRequestsPerThread);
final AtomicLong fastCount = new AtomicLong();
final AtomicLong slowCount = new AtomicLong();
final AtomicLong fastElapsed = new AtomicLong();
final AtomicLong slowElapsed = new AtomicLong();
for (int i = 0; i < slowThreads; i++) {
slowPool.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < slowRequestsPerThread; i++) {
long startTime = System.currentTimeMillis();
try {
ListenableFuture<StatusResponseHolder> go = slowClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/slow/hello")), new StatusResponseHandler(Charset.defaultCharset()));
go.get();
slowCount.incrementAndGet();
slowElapsed.addAndGet(System.currentTimeMillis() - startTime);
} catch (InterruptedException e) {
// BE COOL
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}
}
});
}
// wait for jetty server pool to completely fill up
while (server.getThreadPool().getIdleThreads() != 0) {
Thread.sleep(25);
}
for (int i = 0; i < fastThreads; i++) {
fastPool.submit(new Runnable() {
@Override
public void run() {
for (int i = 0; i < fastRequestsPerThread; i++) {
long startTime = System.currentTimeMillis();
try {
ListenableFuture<StatusResponseHolder> go = fastClient.go(new Request(HttpMethod.GET, new URL("http://localhost:" + port + "/default")), new StatusResponseHandler(Charset.defaultCharset()));
go.get();
fastCount.incrementAndGet();
fastElapsed.addAndGet(System.currentTimeMillis() - startTime);
latch.countDown();
} catch (InterruptedException e) {
// BE COOL
} catch (Exception e) {
e.printStackTrace();
throw Throwables.propagate(e);
}
}
}
});
}
// Wait for all fast requests to be served
latch.await();
slowPool.shutdownNow();
fastPool.shutdown();
// check that fast requests finished quickly
Assert.assertTrue(fastElapsed.get() / fastCount.get() < 500);
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class KafkaSupervisor method generateReport.
private KafkaSupervisorReport generateReport(boolean includeOffsets) {
int numPartitions = 0;
for (Map<Integer, Long> partitionGroup : partitionGroups.values()) {
numPartitions += partitionGroup.size();
}
KafkaSupervisorReport report = new KafkaSupervisorReport(dataSource, DateTime.now(), ioConfig.getTopic(), numPartitions, ioConfig.getReplicas(), ioConfig.getTaskDuration().getMillis() / 1000);
List<TaskReportData> taskReports = Lists.newArrayList();
List<ListenableFuture<Map<Integer, Long>>> futures = Lists.newArrayList();
try {
for (TaskGroup taskGroup : taskGroups.values()) {
for (Map.Entry<String, TaskData> entry : taskGroup.tasks.entrySet()) {
String taskId = entry.getKey();
DateTime startTime = entry.getValue().startTime;
Long remainingSeconds = null;
if (startTime != null) {
remainingSeconds = Math.max(0, ioConfig.getTaskDuration().getMillis() - (DateTime.now().getMillis() - startTime.getMillis())) / 1000;
}
taskReports.add(new TaskReportData(taskId, (includeOffsets ? taskGroup.partitionOffsets : null), null, startTime, remainingSeconds, TaskReportData.TaskType.ACTIVE));
if (includeOffsets) {
futures.add(taskClient.getCurrentOffsetsAsync(taskId, false));
}
}
}
for (List<TaskGroup> taskGroups : pendingCompletionTaskGroups.values()) {
for (TaskGroup taskGroup : taskGroups) {
for (Map.Entry<String, TaskData> entry : taskGroup.tasks.entrySet()) {
String taskId = entry.getKey();
DateTime startTime = entry.getValue().startTime;
Long remainingSeconds = null;
if (taskGroup.completionTimeout != null) {
remainingSeconds = Math.max(0, taskGroup.completionTimeout.getMillis() - DateTime.now().getMillis()) / 1000;
}
taskReports.add(new TaskReportData(taskId, (includeOffsets ? taskGroup.partitionOffsets : null), null, startTime, remainingSeconds, TaskReportData.TaskType.PUBLISHING));
if (includeOffsets) {
futures.add(taskClient.getCurrentOffsetsAsync(taskId, false));
}
}
}
}
List<Map<Integer, Long>> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
for (int i = 0; i < taskReports.size(); i++) {
TaskReportData reportData = taskReports.get(i);
if (includeOffsets) {
reportData.setCurrentOffsets(results.get(i));
}
report.addTask(reportData);
}
} catch (Exception e) {
log.warn(e, "Failed to generate status report");
}
return report;
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class KafkaSupervisor method discoverTasks.
private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
int taskCount = 0;
List<String> futureTaskIds = Lists.newArrayList();
List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
List<Task> tasks = taskStorage.getActiveTasks();
for (Task task : tasks) {
if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
continue;
}
taskCount++;
final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
final String taskId = task.getId();
// Determine which task group this task belongs to based on one of the partitions handled by this task. If we
// later determine that this task is actively reading, we will make sure that it matches our current partition
// allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
// by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
// state, we will permit it to complete even if it doesn't match our current partition allocation to support
// seamless schema migration.
Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet().iterator();
final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);
if (taskGroupId != null) {
// check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
// and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
TaskGroup taskGroup = taskGroups.get(taskGroupId);
if (!isTaskInPendingCompletionGroups(taskId) && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {
futureTaskIds.add(taskId);
futures.add(Futures.transform(taskClient.getStatusAsync(taskId), new Function<KafkaIndexTask.Status, Boolean>() {
@Override
public Boolean apply(KafkaIndexTask.Status status) {
if (status == KafkaIndexTask.Status.PUBLISHING) {
addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId, kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap());
// update partitionGroups with the publishing task's offsets (if they are greater than what is
// existing) so that the next tasks will start reading from where this task left off
Map<Integer, Long> publishingTaskCurrentOffsets = taskClient.getCurrentOffsets(taskId, true);
for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets.entrySet()) {
Integer partition = entry.getKey();
Long offset = entry.getValue();
ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups.get(getTaskGroupIdForPartition(partition));
boolean succeeded;
do {
succeeded = true;
Long previousOffset = partitionOffsets.putIfAbsent(partition, offset);
if (previousOffset != null && previousOffset < offset) {
succeeded = partitionOffsets.replace(partition, previousOffset, offset);
}
} while (!succeeded);
}
} else {
for (Integer partition : kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()) {
if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
log.warn("Stopping task [%s] which does not match the expected partition allocation", taskId);
try {
stopTask(taskId, false).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.warn(e, "Exception while stopping task");
}
return false;
}
}
if (taskGroups.putIfAbsent(taskGroupId, new TaskGroup(ImmutableMap.copyOf(kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap()), kafkaTask.getIOConfig().getMinimumMessageTime())) == null) {
log.debug("Created new task group [%d]", taskGroupId);
}
if (!isTaskCurrent(taskGroupId, taskId)) {
log.info("Stopping task [%s] which does not match the expected parameters and ingestion spec", taskId);
try {
stopTask(taskId, false).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
} catch (InterruptedException | ExecutionException | TimeoutException e) {
log.warn(e, "Exception while stopping task");
}
return false;
} else {
taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId, new TaskData());
}
}
return true;
}
}, workerExec));
}
}
}
List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
for (int i = 0; i < results.size(); i++) {
if (results.get(i) == null) {
String taskId = futureTaskIds.get(i);
log.warn("Task [%s] failed to return status, killing task", taskId);
killTask(taskId);
}
}
log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class KafkaSupervisor method checkTaskDuration.
private void checkTaskDuration() throws InterruptedException, ExecutionException, TimeoutException {
final List<ListenableFuture<Map<Integer, Long>>> futures = Lists.newArrayList();
final List<Integer> futureGroupIds = Lists.newArrayList();
for (Map.Entry<Integer, TaskGroup> entry : taskGroups.entrySet()) {
Integer groupId = entry.getKey();
TaskGroup group = entry.getValue();
// find the longest running task from this group
DateTime earliestTaskStart = DateTime.now();
for (TaskData taskData : group.tasks.values()) {
if (earliestTaskStart.isAfter(taskData.startTime)) {
earliestTaskStart = taskData.startTime;
}
}
// if this task has run longer than the configured duration, signal all tasks in the group to persist
if (earliestTaskStart.plus(ioConfig.getTaskDuration()).isBeforeNow()) {
log.info("Task group [%d] has run for [%s]", groupId, ioConfig.getTaskDuration());
futureGroupIds.add(groupId);
futures.add(signalTasksToFinish(groupId));
}
}
List<Map<Integer, Long>> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
for (int j = 0; j < results.size(); j++) {
Integer groupId = futureGroupIds.get(j);
TaskGroup group = taskGroups.get(groupId);
Map<Integer, Long> endOffsets = results.get(j);
if (endOffsets != null) {
// set a timeout and put this group in pendingCompletionTaskGroups so that it can be monitored for completion
group.completionTimeout = DateTime.now().plus(ioConfig.getCompletionTimeout());
pendingCompletionTaskGroups.putIfAbsent(groupId, Lists.<TaskGroup>newCopyOnWriteArrayList());
pendingCompletionTaskGroups.get(groupId).add(group);
// set endOffsets as the next startOffsets
for (Map.Entry<Integer, Long> entry : endOffsets.entrySet()) {
partitionGroups.get(groupId).put(entry.getKey(), entry.getValue());
}
} else {
log.warn("All tasks in group [%s] failed to transition to publishing state, killing tasks [%s]", groupId, group.taskIds());
for (String id : group.taskIds()) {
killTask(id);
}
}
// remove this task group from the list of current task groups now that it has been handled
taskGroups.remove(groupId);
}
}
use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.
the class KafkaIndexTaskClientTest method testGetStatusAsync.
@Test
public void testGetStatusAsync() throws Exception {
final int numRequests = TEST_IDS.size();
Capture<Request> captured = Capture.newInstance(CaptureType.ALL);
expect(responseHolder.getStatus()).andReturn(HttpResponseStatus.OK).anyTimes();
expect(responseHolder.getContent()).andReturn("\"READING\"").anyTimes();
expect(httpClient.go(capture(captured), anyObject(FullResponseHandler.class), eq(TEST_HTTP_TIMEOUT))).andReturn(Futures.immediateFuture(responseHolder)).times(numRequests);
replayAll();
List<URL> expectedUrls = Lists.newArrayList();
List<ListenableFuture<KafkaIndexTask.Status>> futures = Lists.newArrayList();
for (int i = 0; i < numRequests; i++) {
expectedUrls.add(new URL(String.format(URL_FORMATTER, TEST_HOST, TEST_PORT, TEST_IDS.get(i), "status")));
futures.add(client.getStatusAsync(TEST_IDS.get(i)));
}
List<KafkaIndexTask.Status> responses = Futures.allAsList(futures).get();
verifyAll();
List<Request> requests = captured.getValues();
Assert.assertEquals(numRequests, requests.size());
Assert.assertEquals(numRequests, responses.size());
for (int i = 0; i < numRequests; i++) {
Assert.assertEquals(HttpMethod.GET, requests.get(i).getMethod());
Assert.assertTrue("unexpectedURL", expectedUrls.contains(requests.get(i).getUrl()));
Assert.assertEquals(KafkaIndexTask.Status.READING, responses.get(i));
}
}
Aggregations