Search in sources :

Example 6 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class NamespaceExtractionCacheManagersTest method testRacyCreation.

@Test(timeout = 30000L)
public void testRacyCreation() throws Exception {
    final int concurrentThreads = 10;
    final ListeningExecutorService service = MoreExecutors.listeningDecorator(Execs.multiThreaded(concurrentThreads, "offheaptest-%s"));
    final List<ListenableFuture<?>> futures = new ArrayList<>();
    final CountDownLatch thunder = new CountDownLatch(1);
    try {
        for (int i = 0; i < concurrentThreads; ++i) {
            futures.add(service.submit(new Runnable() {

                @Override
                public void run() {
                    try {
                        thunder.await();
                    } catch (InterruptedException e) {
                        throw Throwables.propagate(e);
                    }
                    for (int i = 0; i < 1000; ++i) {
                        CacheHandler cacheHandler = manager.createCache();
                        cacheHandler.close();
                    }
                }
            }));
        }
        thunder.countDown();
        Futures.allAsList(futures).get();
    } finally {
        service.shutdown();
        service.awaitTermination(Long.MAX_VALUE, TimeUnit.MILLISECONDS);
    }
    Assert.assertEquals(0, manager.cacheCount());
}
Also used : ArrayList(java.util.ArrayList) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ListeningExecutorService(com.google.common.util.concurrent.ListeningExecutorService) CountDownLatch(java.util.concurrent.CountDownLatch) Test(org.junit.Test)

Example 7 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class RealtimeIndexTaskTest method testRestore.

@Test(timeout = 60_000L)
public void testRestore() throws Exception {
    final File directory = tempFolder.newFolder();
    final RealtimeIndexTask task1 = makeRealtimeTask(null);
    final DataSegment publishedSegment;
    // First run:
    {
        final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
        final TaskToolbox taskToolbox = makeToolbox(task1, mdc, directory);
        final ListenableFuture<TaskStatus> statusFuture = runTask(task1, taskToolbox);
        // Wait for firehose to show up, it starts off null.
        while (task1.getFirehose() == null) {
            Thread.sleep(50);
        }
        final TestFirehose firehose = (TestFirehose) task1.getFirehose();
        firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim1"), ImmutableMap.<String, Object>of("dim1", "foo"))));
        // Trigger graceful shutdown.
        task1.stopGracefully();
        // Wait for the task to finish. The status doesn't really matter, but we'll check it anyway.
        final TaskStatus taskStatus = statusFuture.get();
        Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
        // Nothing should be published.
        Assert.assertEquals(Sets.newHashSet(), mdc.getPublished());
    }
    // Second run:
    {
        final TestIndexerMetadataStorageCoordinator mdc = new TestIndexerMetadataStorageCoordinator();
        final RealtimeIndexTask task2 = makeRealtimeTask(task1.getId());
        final TaskToolbox taskToolbox = makeToolbox(task2, mdc, directory);
        final ListenableFuture<TaskStatus> statusFuture = runTask(task2, taskToolbox);
        // Wait for firehose to show up, it starts off null.
        while (task2.getFirehose() == null) {
            Thread.sleep(50);
        }
        // Do a query, at this point the previous data should be loaded.
        Assert.assertEquals(1, sumMetric(task2, "rows"));
        final TestFirehose firehose = (TestFirehose) task2.getFirehose();
        firehose.addRows(ImmutableList.<InputRow>of(new MapBasedInputRow(now, ImmutableList.of("dim2"), ImmutableMap.<String, Object>of("dim2", "bar"))));
        // Stop the firehose, this will drain out existing events.
        firehose.close();
        // Wait for publish.
        while (mdc.getPublished().isEmpty()) {
            Thread.sleep(50);
        }
        publishedSegment = Iterables.getOnlyElement(mdc.getPublished());
        // Do a query.
        Assert.assertEquals(2, sumMetric(task2, "rows"));
        // Simulate handoff.
        for (Map.Entry<SegmentDescriptor, Pair<Executor, Runnable>> entry : handOffCallbacks.entrySet()) {
            final Pair<Executor, Runnable> executorRunnablePair = entry.getValue();
            Assert.assertEquals(new SegmentDescriptor(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().getPartitionNum()), entry.getKey());
            executorRunnablePair.lhs.execute(executorRunnablePair.rhs);
        }
        handOffCallbacks.clear();
        // Wait for the task to finish.
        final TaskStatus taskStatus = statusFuture.get();
        Assert.assertEquals(TaskStatus.Status.SUCCESS, taskStatus.getStatusCode());
    }
}
Also used : TaskStatus(io.druid.indexing.common.TaskStatus) DataSegment(io.druid.timeline.DataSegment) TaskToolbox(io.druid.indexing.common.TaskToolbox) Executor(java.util.concurrent.Executor) TestIndexerMetadataStorageCoordinator(io.druid.indexing.test.TestIndexerMetadataStorageCoordinator) SegmentDescriptor(io.druid.query.SegmentDescriptor) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) InputRow(io.druid.data.input.InputRow) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) MapBasedInputRow(io.druid.data.input.MapBasedInputRow) File(java.io.File) Pair(io.druid.java.util.common.Pair) Test(org.junit.Test)

Example 8 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class RemoteTaskRunnerRunPendingTasksConcurrencyTest method testConcurrency.

// This task reproduces the races described in https://github.com/druid-io/druid/issues/2842
@Test(timeout = 60_000)
public void testConcurrency() throws Exception {
    rtrTestUtils.makeWorker("worker0");
    rtrTestUtils.makeWorker("worker1");
    remoteTaskRunner = rtrTestUtils.makeRemoteTaskRunner(new TestRemoteTaskRunnerConfig(new Period("PT3600S")) {

        public int getPendingTasksRunnerNumThreads() {
            return 2;
        }
    });
    int numTasks = 6;
    ListenableFuture<TaskStatus>[] results = new ListenableFuture[numTasks];
    Task[] tasks = new Task[numTasks];
    //2 tasks
    for (int i = 0; i < 2; i++) {
        tasks[i] = TestTasks.unending("task" + i);
        results[i] = (remoteTaskRunner.run(tasks[i]));
    }
    waitForBothWorkersToHaveUnackedTasks();
    //3 more tasks, all of which get queued up
    for (int i = 2; i < 5; i++) {
        tasks[i] = TestTasks.unending("task" + i);
        results[i] = (remoteTaskRunner.run(tasks[i]));
    }
    //simulate completion of task0 and task1
    mockWorkerRunningAndCompletionSuccessfulTasks(tasks[0], tasks[1]);
    Assert.assertEquals(TaskStatus.Status.SUCCESS, results[0].get().getStatusCode());
    Assert.assertEquals(TaskStatus.Status.SUCCESS, results[1].get().getStatusCode());
    // now both threads race to run the last 3 tasks. task2 and task3 are being assigned
    waitForBothWorkersToHaveUnackedTasks();
    if (remoteTaskRunner.getWorkersWithUnacknowledgedTask().containsValue(tasks[2].getId()) && remoteTaskRunner.getWorkersWithUnacknowledgedTask().containsValue(tasks[3].getId())) {
        remoteTaskRunner.shutdown("task4");
        mockWorkerRunningAndCompletionSuccessfulTasks(tasks[3], tasks[2]);
        Assert.assertEquals(TaskStatus.Status.SUCCESS, results[3].get().getStatusCode());
        Assert.assertEquals(TaskStatus.Status.SUCCESS, results[2].get().getStatusCode());
    } else if (remoteTaskRunner.getWorkersWithUnacknowledgedTask().containsValue(tasks[3].getId()) && remoteTaskRunner.getWorkersWithUnacknowledgedTask().containsValue(tasks[4].getId())) {
        remoteTaskRunner.shutdown("task2");
        mockWorkerRunningAndCompletionSuccessfulTasks(tasks[4], tasks[3]);
        Assert.assertEquals(TaskStatus.Status.SUCCESS, results[4].get().getStatusCode());
        Assert.assertEquals(TaskStatus.Status.SUCCESS, results[3].get().getStatusCode());
    } else if (remoteTaskRunner.getWorkersWithUnacknowledgedTask().containsValue(tasks[4].getId()) && remoteTaskRunner.getWorkersWithUnacknowledgedTask().containsValue(tasks[2].getId())) {
        remoteTaskRunner.shutdown("task3");
        mockWorkerRunningAndCompletionSuccessfulTasks(tasks[4], tasks[2]);
        Assert.assertEquals(TaskStatus.Status.SUCCESS, results[4].get().getStatusCode());
        Assert.assertEquals(TaskStatus.Status.SUCCESS, results[2].get().getStatusCode());
    } else {
        throw new ISE("two out of three tasks 2,3 and 4 must be waiting for ack.");
    }
    //ensure that RTR is doing OK and still making progress
    tasks[5] = TestTasks.unending("task5");
    results[5] = remoteTaskRunner.run(tasks[5]);
    waitForOneWorkerToHaveUnackedTasks();
    if (rtrTestUtils.taskAnnounced("worker0", tasks[5].getId())) {
        rtrTestUtils.mockWorkerRunningTask("worker0", tasks[5]);
        rtrTestUtils.mockWorkerCompleteSuccessfulTask("worker0", tasks[5]);
    } else {
        rtrTestUtils.mockWorkerRunningTask("worker1", tasks[5]);
        rtrTestUtils.mockWorkerCompleteSuccessfulTask("worker1", tasks[5]);
    }
    Assert.assertEquals(TaskStatus.Status.SUCCESS, results[5].get().getStatusCode());
}
Also used : Task(io.druid.indexing.common.task.Task) Period(org.joda.time.Period) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ISE(io.druid.java.util.common.ISE) Test(org.junit.Test)

Example 9 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class ChainedExecutionQueryRunner method run.

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final int priority = BaseQuery.getContextPriority(query, 0);
    final Ordering ordering = query.getResultOrdering();
    return new BaseSequence<T, Iterator<T>>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {

        @Override
        public Iterator<T> make() {
            // Make it a List<> to materialize all of the values (so that it will submit everything to the executor)
            ListenableFuture<List<Iterable<T>>> futures = Futures.allAsList(Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<T>, ListenableFuture<Iterable<T>>>() {

                @Override
                public ListenableFuture<Iterable<T>> apply(final QueryRunner<T> input) {
                    if (input == null) {
                        throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
                    }
                    return exec.submit(new AbstractPrioritizedCallable<Iterable<T>>(priority) {

                        @Override
                        public Iterable<T> call() throws Exception {
                            try {
                                Sequence<T> result = input.run(query, responseContext);
                                if (result == null) {
                                    throw new ISE("Got a null result! Segments are missing!");
                                }
                                List<T> retVal = Sequences.toList(result, Lists.<T>newArrayList());
                                if (retVal == null) {
                                    throw new ISE("Got a null list of results! WTF?!");
                                }
                                return retVal;
                            } catch (QueryInterruptedException e) {
                                throw Throwables.propagate(e);
                            } catch (Exception e) {
                                log.error(e, "Exception with one of the sequences!");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
                }
            })));
            queryWatcher.registerQuery(query, futures);
            try {
                final Number timeout = query.getContextValue(QueryContextKeys.TIMEOUT, (Number) null);
                return new MergeIterable<>(ordering.nullsFirst(), timeout == null ? futures.get() : futures.get(timeout.longValue(), TimeUnit.MILLISECONDS)).iterator();
            } catch (InterruptedException e) {
                log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
                futures.cancel(true);
                throw new QueryInterruptedException(e);
            } catch (CancellationException e) {
                throw new QueryInterruptedException(e);
            } catch (TimeoutException e) {
                log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
                futures.cancel(true);
                throw new QueryInterruptedException(e);
            } catch (ExecutionException e) {
                throw Throwables.propagate(e.getCause());
            }
        }

        @Override
        public void cleanup(Iterator<T> tIterator) {
        }
    });
}
Also used : MergeIterable(io.druid.java.util.common.guava.MergeIterable) BaseSequence(io.druid.java.util.common.guava.BaseSequence) Sequence(io.druid.java.util.common.guava.Sequence) BaseSequence(io.druid.java.util.common.guava.BaseSequence) CancellationException(java.util.concurrent.CancellationException) TimeoutException(java.util.concurrent.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException) Function(com.google.common.base.Function) MergeIterable(io.druid.java.util.common.guava.MergeIterable) CancellationException(java.util.concurrent.CancellationException) Ordering(com.google.common.collect.Ordering) Iterator(java.util.Iterator) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ISE(io.druid.java.util.common.ISE) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Example 10 with ListenableFuture

use of com.google.common.util.concurrent.ListenableFuture in project druid by druid-io.

the class PrioritizedExecutorServiceTest method testOrderedExecutionEqualPriorityMix.

@Test
public void testOrderedExecutionEqualPriorityMix() throws ExecutionException, InterruptedException {
    exec = new PrioritizedExecutorService(exec.threadPoolExecutor, true, 0, config);
    final int numTasks = 1_000;
    final List<ListenableFuture<?>> futures = Lists.newArrayListWithExpectedSize(numTasks);
    final AtomicInteger hasRun = new AtomicInteger(0);
    final Random random = new Random(789401);
    for (int i = 0; i < numTasks; ++i) {
        switch(random.nextInt(4)) {
            case 0:
                futures.add(exec.submit(getCheckingPrioritizedCallable(i, hasRun)));
                break;
            case 1:
                futures.add(exec.submit(getCheckingPrioritizedRunnable(i, hasRun)));
                break;
            case 2:
                futures.add(exec.submit(getCheckingCallable(i, hasRun)));
                break;
            case 3:
                futures.add(exec.submit(getCheckingRunnable(i, hasRun)));
                break;
            default:
                Assert.fail("Bad random result");
        }
    }
    latch.countDown();
    checkFutures(futures);
}
Also used : Random(java.util.Random) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Test(org.junit.Test)

Aggregations

ListenableFuture (com.google.common.util.concurrent.ListenableFuture)566 ArrayList (java.util.ArrayList)292 List (java.util.List)175 Test (org.junit.Test)158 ListeningExecutorService (com.google.common.util.concurrent.ListeningExecutorService)111 ExecutionException (java.util.concurrent.ExecutionException)111 Map (java.util.Map)108 Futures (com.google.common.util.concurrent.Futures)93 IOException (java.io.IOException)77 WriteTransaction (org.opendaylight.controller.md.sal.binding.api.WriteTransaction)71 HashMap (java.util.HashMap)64 ImmutableList (com.google.common.collect.ImmutableList)63 MoreExecutors (com.google.common.util.concurrent.MoreExecutors)62 ImmutableMap (com.google.common.collect.ImmutableMap)59 Set (java.util.Set)59 BigInteger (java.math.BigInteger)57 File (java.io.File)56 Logger (org.slf4j.Logger)56 Nullable (javax.annotation.Nullable)55 LoggerFactory (org.slf4j.LoggerFactory)55