Search in sources :

Example 81 with ExecutionException

use of java.util.concurrent.ExecutionException in project druid by druid-io.

the class LookupCoordinatorManager method updateNodes.

void updateNodes(Collection<URL> urls, final Map<String, Map<String, Object>> knownLookups) throws IOException, InterruptedException, ExecutionException {
    if (knownLookups == null) {
        LOG.debug("No config for lookups found");
        return;
    }
    if (knownLookups.isEmpty()) {
        LOG.debug("No known lookups. Skipping update");
        return;
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Updating %d lookups on %d nodes", knownLookups.size(), urls.size());
    }
    final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
    for (final URL url : urls) {
        futures.add(executorService.submit(new Runnable() {

            @Override
            public void run() {
                try {
                    updateAllOnHost(url, knownLookups);
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    LOG.warn("Update on [%s] interrupted", url);
                    throw Throwables.propagate(e);
                } catch (IOException | ExecutionException e) {
                    // Don't raise as ExecutionException. Just log and continue
                    LOG.makeAlert(e, "Error submitting to [%s]", url).emit();
                }
            }
        }));
    }
    final ListenableFuture allFuture = Futures.allAsList(futures);
    try {
        allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        LOG.warn("Timeout in updating hosts! Attempting to cancel");
        // This should cause Interrupted exceptions on the offending ones
        allFuture.cancel(true);
    }
}
Also used : ArrayList(java.util.ArrayList) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) URL(java.net.URL) TimeoutException(java.util.concurrent.TimeoutException)

Example 82 with ExecutionException

use of java.util.concurrent.ExecutionException in project druid by druid-io.

the class LookupCoordinatorManager method deleteAllOnTier.

void deleteAllOnTier(final String tier, final Collection<String> dropLookups) throws ExecutionException, InterruptedException, IOException {
    if (dropLookups.isEmpty()) {
        LOG.debug("Nothing to drop");
        return;
    }
    final Collection<URL> urls = getAllHostsAnnounceEndpoint(tier);
    final List<ListenableFuture<?>> futures = new ArrayList<>(urls.size());
    for (final URL url : urls) {
        futures.add(executorService.submit(new Runnable() {

            @Override
            public void run() {
                for (final String drop : dropLookups) {
                    final URL lookupURL;
                    try {
                        lookupURL = new URL(url.getProtocol(), url.getHost(), url.getPort(), String.format("%s/%s", url.getFile(), drop));
                    } catch (MalformedURLException e) {
                        throw new ISE(e, "Error creating url for [%s]/[%s]", url, drop);
                    }
                    try {
                        deleteOnHost(lookupURL);
                    } catch (InterruptedException e) {
                        Thread.currentThread().interrupt();
                        LOG.warn("Delete [%s] interrupted", lookupURL);
                        throw Throwables.propagate(e);
                    } catch (IOException | ExecutionException e) {
                        // Don't raise as ExecutionException. Just log and continue
                        LOG.makeAlert(e, "Error deleting [%s]", lookupURL).emit();
                    }
                }
            }
        }));
    }
    final ListenableFuture allFuture = Futures.allAsList(futures);
    try {
        allFuture.get(lookupCoordinatorManagerConfig.getUpdateAllTimeout().getMillis(), TimeUnit.MILLISECONDS);
    } catch (TimeoutException e) {
        // This should cause Interrupted exceptions on the offending ones
        allFuture.cancel(true);
        throw new ExecutionException("Timeout in updating hosts! Attempting to cancel", e);
    }
}
Also used : MalformedURLException(java.net.MalformedURLException) ArrayList(java.util.ArrayList) IOException(java.io.IOException) URL(java.net.URL) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ISE(io.druid.java.util.common.ISE) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Example 83 with ExecutionException

use of java.util.concurrent.ExecutionException in project che by eclipse.

the class WSocketEventBusClient method connect.

private void connect(final URI wsUri, final Collection<String> channels) throws IOException, DeploymentException {
    Future<WSClient> clientFuture = connections.get(wsUri);
    if (clientFuture == null) {
        FutureTask<WSClient> newFuture = new FutureTask<>(() -> {
            WSClient wsClient = new WSClient(wsUri, new WSocketListener(wsUri, channels));
            wsClient.connect((int) WS_CONNECTION_TIMEOUT);
            return wsClient;
        });
        clientFuture = connections.putIfAbsent(wsUri, newFuture);
        if (clientFuture == null) {
            clientFuture = newFuture;
            newFuture.run();
        }
    }
    boolean connected = false;
    try {
        // wait for connection
        clientFuture.get();
        connected = true;
    } catch (ExecutionException e) {
        final Throwable cause = e.getCause();
        if (cause instanceof Error) {
            throw (Error) cause;
        } else if (cause instanceof RuntimeException) {
            throw (RuntimeException) cause;
        } else if (cause instanceof IOException) {
            throw (IOException) cause;
        } else if (cause instanceof DeploymentException)
            throw (DeploymentException) cause;
        throw new RuntimeException(e);
    } catch (InterruptedException e) {
        LOG.info("Client interrupted " + e.getLocalizedMessage());
        Thread.currentThread().interrupt();
        throw new RuntimeException(e);
    } finally {
        if (!connected) {
            connections.remove(wsUri);
        }
    }
}
Also used : WSClient(org.everrest.websockets.client.WSClient) IOException(java.io.IOException) FutureTask(java.util.concurrent.FutureTask) DeploymentException(javax.websocket.DeploymentException) ExecutionException(java.util.concurrent.ExecutionException)

Example 84 with ExecutionException

use of java.util.concurrent.ExecutionException in project druid by druid-io.

the class KafkaSupervisor method discoverTasks.

private void discoverTasks() throws ExecutionException, InterruptedException, TimeoutException {
    int taskCount = 0;
    List<String> futureTaskIds = Lists.newArrayList();
    List<ListenableFuture<Boolean>> futures = Lists.newArrayList();
    List<Task> tasks = taskStorage.getActiveTasks();
    for (Task task : tasks) {
        if (!(task instanceof KafkaIndexTask) || !dataSource.equals(task.getDataSource())) {
            continue;
        }
        taskCount++;
        final KafkaIndexTask kafkaTask = (KafkaIndexTask) task;
        final String taskId = task.getId();
        // Determine which task group this task belongs to based on one of the partitions handled by this task. If we
        // later determine that this task is actively reading, we will make sure that it matches our current partition
        // allocation (getTaskGroupIdForPartition(partition) should return the same value for every partition being read
        // by this task) and kill it if it is not compatible. If the task is instead found to be in the publishing
        // state, we will permit it to complete even if it doesn't match our current partition allocation to support
        // seamless schema migration.
        Iterator<Integer> it = kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet().iterator();
        final Integer taskGroupId = (it.hasNext() ? getTaskGroupIdForPartition(it.next()) : null);
        if (taskGroupId != null) {
            // check to see if we already know about this task, either in [taskGroups] or in [pendingCompletionTaskGroups]
            // and if not add it to taskGroups or pendingCompletionTaskGroups (if status = PUBLISHING)
            TaskGroup taskGroup = taskGroups.get(taskGroupId);
            if (!isTaskInPendingCompletionGroups(taskId) && (taskGroup == null || !taskGroup.tasks.containsKey(taskId))) {
                futureTaskIds.add(taskId);
                futures.add(Futures.transform(taskClient.getStatusAsync(taskId), new Function<KafkaIndexTask.Status, Boolean>() {

                    @Override
                    public Boolean apply(KafkaIndexTask.Status status) {
                        if (status == KafkaIndexTask.Status.PUBLISHING) {
                            addDiscoveredTaskToPendingCompletionTaskGroups(taskGroupId, taskId, kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap());
                            // update partitionGroups with the publishing task's offsets (if they are greater than what is
                            // existing) so that the next tasks will start reading from where this task left off
                            Map<Integer, Long> publishingTaskCurrentOffsets = taskClient.getCurrentOffsets(taskId, true);
                            for (Map.Entry<Integer, Long> entry : publishingTaskCurrentOffsets.entrySet()) {
                                Integer partition = entry.getKey();
                                Long offset = entry.getValue();
                                ConcurrentHashMap<Integer, Long> partitionOffsets = partitionGroups.get(getTaskGroupIdForPartition(partition));
                                boolean succeeded;
                                do {
                                    succeeded = true;
                                    Long previousOffset = partitionOffsets.putIfAbsent(partition, offset);
                                    if (previousOffset != null && previousOffset < offset) {
                                        succeeded = partitionOffsets.replace(partition, previousOffset, offset);
                                    }
                                } while (!succeeded);
                            }
                        } else {
                            for (Integer partition : kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap().keySet()) {
                                if (!taskGroupId.equals(getTaskGroupIdForPartition(partition))) {
                                    log.warn("Stopping task [%s] which does not match the expected partition allocation", taskId);
                                    try {
                                        stopTask(taskId, false).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
                                    } catch (InterruptedException | ExecutionException | TimeoutException e) {
                                        log.warn(e, "Exception while stopping task");
                                    }
                                    return false;
                                }
                            }
                            if (taskGroups.putIfAbsent(taskGroupId, new TaskGroup(ImmutableMap.copyOf(kafkaTask.getIOConfig().getStartPartitions().getPartitionOffsetMap()), kafkaTask.getIOConfig().getMinimumMessageTime())) == null) {
                                log.debug("Created new task group [%d]", taskGroupId);
                            }
                            if (!isTaskCurrent(taskGroupId, taskId)) {
                                log.info("Stopping task [%s] which does not match the expected parameters and ingestion spec", taskId);
                                try {
                                    stopTask(taskId, false).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
                                } catch (InterruptedException | ExecutionException | TimeoutException e) {
                                    log.warn(e, "Exception while stopping task");
                                }
                                return false;
                            } else {
                                taskGroups.get(taskGroupId).tasks.putIfAbsent(taskId, new TaskData());
                            }
                        }
                        return true;
                    }
                }, workerExec));
            }
        }
    }
    List<Boolean> results = Futures.successfulAsList(futures).get(futureTimeoutInSeconds, TimeUnit.SECONDS);
    for (int i = 0; i < results.size(); i++) {
        if (results.get(i) == null) {
            String taskId = futureTaskIds.get(i);
            log.warn("Task [%s] failed to return status, killing task", taskId);
            killTask(taskId);
        }
    }
    log.debug("Found [%d] Kafka indexing tasks for dataSource [%s]", taskCount, dataSource);
}
Also used : Task(io.druid.indexing.common.task.Task) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) Function(com.google.common.base.Function) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) TaskStatus(io.druid.indexing.common.TaskStatus) KafkaIndexTask(io.druid.indexing.kafka.KafkaIndexTask) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap)

Example 85 with ExecutionException

use of java.util.concurrent.ExecutionException in project druid by druid-io.

the class ChainedExecutionQueryRunner method run.

@Override
public Sequence<T> run(final Query<T> query, final Map<String, Object> responseContext) {
    final int priority = BaseQuery.getContextPriority(query, 0);
    final Ordering ordering = query.getResultOrdering();
    return new BaseSequence<T, Iterator<T>>(new BaseSequence.IteratorMaker<T, Iterator<T>>() {

        @Override
        public Iterator<T> make() {
            // Make it a List<> to materialize all of the values (so that it will submit everything to the executor)
            ListenableFuture<List<Iterable<T>>> futures = Futures.allAsList(Lists.newArrayList(Iterables.transform(queryables, new Function<QueryRunner<T>, ListenableFuture<Iterable<T>>>() {

                @Override
                public ListenableFuture<Iterable<T>> apply(final QueryRunner<T> input) {
                    if (input == null) {
                        throw new ISE("Null queryRunner! Looks to be some segment unmapping action happening");
                    }
                    return exec.submit(new AbstractPrioritizedCallable<Iterable<T>>(priority) {

                        @Override
                        public Iterable<T> call() throws Exception {
                            try {
                                Sequence<T> result = input.run(query, responseContext);
                                if (result == null) {
                                    throw new ISE("Got a null result! Segments are missing!");
                                }
                                List<T> retVal = Sequences.toList(result, Lists.<T>newArrayList());
                                if (retVal == null) {
                                    throw new ISE("Got a null list of results! WTF?!");
                                }
                                return retVal;
                            } catch (QueryInterruptedException e) {
                                throw Throwables.propagate(e);
                            } catch (Exception e) {
                                log.error(e, "Exception with one of the sequences!");
                                throw Throwables.propagate(e);
                            }
                        }
                    });
                }
            })));
            queryWatcher.registerQuery(query, futures);
            try {
                final Number timeout = query.getContextValue(QueryContextKeys.TIMEOUT, (Number) null);
                return new MergeIterable<>(ordering.nullsFirst(), timeout == null ? futures.get() : futures.get(timeout.longValue(), TimeUnit.MILLISECONDS)).iterator();
            } catch (InterruptedException e) {
                log.warn(e, "Query interrupted, cancelling pending results, query id [%s]", query.getId());
                futures.cancel(true);
                throw new QueryInterruptedException(e);
            } catch (CancellationException e) {
                throw new QueryInterruptedException(e);
            } catch (TimeoutException e) {
                log.info("Query timeout, cancelling pending results for query id [%s]", query.getId());
                futures.cancel(true);
                throw new QueryInterruptedException(e);
            } catch (ExecutionException e) {
                throw Throwables.propagate(e.getCause());
            }
        }

        @Override
        public void cleanup(Iterator<T> tIterator) {
        }
    });
}
Also used : MergeIterable(io.druid.java.util.common.guava.MergeIterable) BaseSequence(io.druid.java.util.common.guava.BaseSequence) Sequence(io.druid.java.util.common.guava.Sequence) BaseSequence(io.druid.java.util.common.guava.BaseSequence) CancellationException(java.util.concurrent.CancellationException) TimeoutException(java.util.concurrent.TimeoutException) ExecutionException(java.util.concurrent.ExecutionException) Function(com.google.common.base.Function) MergeIterable(io.druid.java.util.common.guava.MergeIterable) CancellationException(java.util.concurrent.CancellationException) Ordering(com.google.common.collect.Ordering) Iterator(java.util.Iterator) ListenableFuture(com.google.common.util.concurrent.ListenableFuture) ISE(io.druid.java.util.common.ISE) List(java.util.List) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

ExecutionException (java.util.concurrent.ExecutionException)1341 IOException (java.io.IOException)367 Test (org.junit.Test)335 TimeoutException (java.util.concurrent.TimeoutException)258 ArrayList (java.util.ArrayList)237 Future (java.util.concurrent.Future)218 ExecutorService (java.util.concurrent.ExecutorService)152 CountDownLatch (java.util.concurrent.CountDownLatch)103 List (java.util.List)98 CancellationException (java.util.concurrent.CancellationException)98 Callable (java.util.concurrent.Callable)97 Test (org.testng.annotations.Test)78 HashMap (java.util.HashMap)69 Map (java.util.Map)65 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)64 RejectedExecutionException (java.util.concurrent.RejectedExecutionException)63 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)56 ParallelTest (com.hazelcast.test.annotation.ParallelTest)47 QuickTest (com.hazelcast.test.annotation.QuickTest)47 UncheckedExecutionException (com.google.common.util.concurrent.UncheckedExecutionException)46