Search in sources :

Example 26 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project druid by druid-io.

the class KafkaIndexTask method run.

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");
    startTime = DateTime.now();
    mapper = toolbox.getObjectMapper();
    status = Status.STARTING;
    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }
    runThread = Thread.currentThread();
    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler().addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics), ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));
    try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
        final FiniteAppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics);
        final KafkaConsumer<byte[], byte[]> consumer = newConsumer()) {
        appenderator = appenderator0;
        final String topic = ioConfig.getStartPartitions().getTopic();
        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            nextOffsets.putAll(ioConfig.getStartPartitions().getPartitionOffsetMap());
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final KafkaPartitions restoredNextPartitions = toolbox.getObjectMapper().convertValue(restoredMetadataMap.get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
            nextOffsets.putAll(restoredNextPartitions.getPartitionOffsetMap());
            // Sanity checks.
            if (!restoredNextPartitions.getTopic().equals(ioConfig.getStartPartitions().getTopic())) {
                throw new ISE("WTF?! Restored topic[%s] but expected topic[%s]", restoredNextPartitions.getTopic(), ioConfig.getStartPartitions().getTopic());
            }
            if (!nextOffsets.keySet().equals(ioConfig.getStartPartitions().getPartitionOffsetMap().keySet())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets.keySet(), ioConfig.getStartPartitions().getPartitionOffsetMap().keySet());
            }
        }
        // Set up sequenceNames.
        final Map<Integer, String> sequenceNames = Maps.newHashMap();
        for (Integer partitionNum : nextOffsets.keySet()) {
            sequenceNames.put(partitionNum, String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
        }
        // Set up committer.
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {

            @Override
            public Committer get() {
                final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);
                return new Committer() {

                    @Override
                    public Object getMetadata() {
                        return ImmutableMap.of(METADATA_NEXT_PARTITIONS, new KafkaPartitions(ioConfig.getStartPartitions().getTopic(), snapshot));
                    }

                    @Override
                    public void run() {
                    // Do nothing.
                    }
                };
            }
        };
        Set<Integer> assignment = assignPartitionsAndSeekToNext(consumer, topic);
        // Main loop.
        // Could eventually support leader/follower mode (for keeping replicas more in sync)
        boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        try {
            while (stillReading) {
                if (possiblyPause(assignment)) {
                    // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
                    // partitions upon resuming. This is safe even if the end offsets have not been modified.
                    assignment = assignPartitionsAndSeekToNext(consumer, topic);
                    if (assignment.isEmpty()) {
                        log.info("All partitions have been fully read");
                        publishOnStop = true;
                        stopRequested = true;
                    }
                }
                if (stopRequested) {
                    break;
                }
                // The retrying business is because the KafkaConsumer throws OffsetOutOfRangeException if the seeked-to
                // offset is not present in the topic-partition. This can happen if we're asking a task to read from data
                // that has not been written yet (which is totally legitimate). So let's wait for it to show up.
                ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty();
                try {
                    records = consumer.poll(POLL_TIMEOUT);
                } catch (OffsetOutOfRangeException e) {
                    log.warn("OffsetOutOfRangeException with message [%s]", e.getMessage());
                    possiblyResetOffsetsOrWait(e.offsetOutOfRangePartitions(), consumer, toolbox);
                    stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
                }
                for (ConsumerRecord<byte[], byte[]> record : records) {
                    if (log.isTraceEnabled()) {
                        log.trace("Got topic[%s] partition[%d] offset[%,d].", record.topic(), record.partition(), record.offset());
                    }
                    if (record.offset() < endOffsets.get(record.partition())) {
                        if (record.offset() != nextOffsets.get(record.partition())) {
                            throw new ISE("WTF?! Got offset[%,d] after offset[%,d] in partition[%d].", record.offset(), nextOffsets.get(record.partition()), record.partition());
                        }
                        try {
                            final byte[] valueBytes = record.value();
                            if (valueBytes == null) {
                                throw new ParseException("null value");
                            }
                            final InputRow row = Preconditions.checkNotNull(parser.parse(ByteBuffer.wrap(valueBytes)), "row");
                            if (!ioConfig.getMinimumMessageTime().isPresent() || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {
                                final SegmentIdentifier identifier = driver.add(row, sequenceNames.get(record.partition()), committerSupplier);
                                if (identifier == null) {
                                    // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                                    throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp());
                                }
                                fireDepartmentMetrics.incrementProcessed();
                            } else {
                                fireDepartmentMetrics.incrementThrownAway();
                            }
                        } catch (ParseException e) {
                            if (tuningConfig.isReportParseExceptions()) {
                                throw e;
                            } else {
                                log.debug(e, "Dropping unparseable row from partition[%d] offset[%,d].", record.partition(), record.offset());
                                fireDepartmentMetrics.incrementUnparseable();
                            }
                        }
                        nextOffsets.put(record.partition(), record.offset() + 1);
                    }
                    if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition())) && assignment.remove(record.partition())) {
                        log.info("Finished reading topic[%s], partition[%,d].", record.topic(), record.partition());
                        assignPartitions(consumer, topic, assignment);
                        stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
                    }
                }
            }
        } finally {
            // persist pending data
            driver.persist(committerSupplier.get());
        }
        synchronized (statusLock) {
            if (stopRequested && !publishOnStop) {
                throw new InterruptedException("Stopping without publishing");
            }
            status = Status.PUBLISHING;
        }
        final TransactionalSegmentPublisher publisher = new TransactionalSegmentPublisher() {

            @Override
            public boolean publishSegments(Set<DataSegment> segments, Object commitMetadata) throws IOException {
                final KafkaPartitions finalPartitions = toolbox.getObjectMapper().convertValue(((Map) commitMetadata).get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
                // Sanity check, we should only be publishing things that match our desired end state.
                if (!endOffsets.equals(finalPartitions.getPartitionOffsetMap())) {
                    throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
                }
                final SegmentTransactionalInsertAction action;
                if (ioConfig.isUseTransaction()) {
                    action = new SegmentTransactionalInsertAction(segments, new KafkaDataSourceMetadata(ioConfig.getStartPartitions()), new KafkaDataSourceMetadata(finalPartitions));
                } else {
                    action = new SegmentTransactionalInsertAction(segments, null, null);
                }
                log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());
                return toolbox.getTaskActionClient().submit(action).isSuccess();
            }
        };
        final SegmentsAndMetadata published = driver.finish(publisher, committerSupplier.get());
        if (published == null) {
            throw new ISE("Transaction failure publishing segments, aborting");
        } else {
            log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ").join(Iterables.transform(published.getSegments(), new Function<DataSegment, String>() {

                @Override
                public String apply(DataSegment input) {
                    return input.getIdentifier();
                }
            })), published.getCommitMetadata());
        }
    } catch (InterruptedException | RejectedExecutionException e) {
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }
        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested) {
            Thread.currentThread().interrupt();
            throw e;
        }
        log.info("The task was asked to stop before completing");
    } finally {
        if (chatHandlerProvider.isPresent()) {
            chatHandlerProvider.get().unregister(getId());
        }
    }
    return success();
}
Also used : RealtimeIOConfig(io.druid.segment.indexing.RealtimeIOConfig) Set(java.util.Set) SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) SegmentTransactionalInsertAction(io.druid.indexing.common.actions.SegmentTransactionalInsertAction) DataSegment(io.druid.timeline.DataSegment) FireDepartment(io.druid.segment.realtime.FireDepartment) TransactionalSegmentPublisher(io.druid.segment.realtime.appenderator.TransactionalSegmentPublisher) ISE(io.druid.java.util.common.ISE) Supplier(com.google.common.base.Supplier) SegmentsAndMetadata(io.druid.segment.realtime.appenderator.SegmentsAndMetadata) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Appenderator(io.druid.segment.realtime.appenderator.Appenderator) FiniteAppenderatorDriver(io.druid.segment.realtime.appenderator.FiniteAppenderatorDriver) InputRow(io.druid.data.input.InputRow) RealtimeMetricsMonitor(io.druid.segment.realtime.RealtimeMetricsMonitor) Committer(io.druid.data.input.Committer) ParseException(io.druid.java.util.common.parsers.ParseException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 27 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project jetty.project by eclipse.

the class TimerScheduler method schedule.

@Override
public Task schedule(final Runnable task, final long delay, final TimeUnit units) {
    Timer timer = _timer;
    if (timer == null)
        throw new RejectedExecutionException("STOPPED: " + this);
    SimpleTask t = new SimpleTask(task);
    timer.schedule(t, units.toMillis(delay));
    return t;
}
Also used : Timer(java.util.Timer) RejectedExecutionException(java.util.concurrent.RejectedExecutionException)

Example 28 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project elasticsearch by elastic.

the class MockTcpTransport method connectToChannels.

@Override
protected NodeChannels connectToChannels(DiscoveryNode node, ConnectionProfile profile) throws IOException {
    final MockChannel[] mockChannels = new MockChannel[1];
    // we always use light here
    final NodeChannels nodeChannels = new NodeChannels(node, mockChannels, LIGHT_PROFILE);
    boolean success = false;
    final MockSocket socket = new MockSocket();
    try {
        Consumer<MockChannel> onClose = (channel) -> {
            final NodeChannels connected = connectedNodes.get(node);
            if (connected != null && connected.hasChannel(channel)) {
                try {
                    executor.execute(() -> {
                        disconnectFromNode(node, channel, "channel closed event");
                    });
                } catch (RejectedExecutionException ex) {
                    logger.debug("failed to run disconnectFromNode - node is shutting down");
                }
            }
        };
        final InetSocketAddress address = node.getAddress().address();
        // we just use a single connections
        configureSocket(socket);
        final TimeValue connectTimeout = profile.getConnectTimeout();
        try {
            socket.connect(address, Math.toIntExact(connectTimeout.millis()));
        } catch (SocketTimeoutException ex) {
            throw new ConnectTransportException(node, "connect_timeout[" + connectTimeout + "]", ex);
        }
        MockChannel channel = new MockChannel(socket, address, "none", onClose);
        channel.loopRead(executor);
        mockChannels[0] = channel;
        success = true;
    } finally {
        if (success == false) {
            IOUtils.close(nodeChannels, socket);
        }
    }
    return nodeChannels;
}
Also used : CancellableThreads(org.elasticsearch.common.util.CancellableThreads) Socket(java.net.Socket) BufferedInputStream(java.io.BufferedInputStream) BigArrays(org.elasticsearch.common.util.BigArrays) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) BytesStreamOutput(org.elasticsearch.common.io.stream.BytesStreamOutput) CircuitBreakerService(org.elasticsearch.indices.breaker.CircuitBreakerService) BufferedOutputStream(java.io.BufferedOutputStream) ServerSocket(java.net.ServerSocket) HashSet(java.util.HashSet) InputStreamStreamInput(org.elasticsearch.common.io.stream.InputStreamStreamInput) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) NetworkService(org.elasticsearch.common.network.NetworkService) SocketException(java.net.SocketException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Settings(org.elasticsearch.common.settings.Settings) NamedWriteableRegistry(org.elasticsearch.common.io.stream.NamedWriteableRegistry) TimeValue(org.elasticsearch.common.unit.TimeValue) SocketTimeoutException(java.net.SocketTimeoutException) Map(java.util.Map) ThreadPool(org.elasticsearch.threadpool.ThreadPool) ExecutorService(java.util.concurrent.ExecutorService) ByteSizeValue(org.elasticsearch.common.unit.ByteSizeValue) OutputStream(java.io.OutputStream) EsExecutors(org.elasticsearch.common.util.concurrent.EsExecutors) MockSocket(org.elasticsearch.mocksocket.MockSocket) Executor(java.util.concurrent.Executor) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) IOUtils(org.apache.lucene.util.IOUtils) Set(java.util.Set) IOException(java.io.IOException) BytesReference(org.elasticsearch.common.bytes.BytesReference) InetSocketAddress(java.net.InetSocketAddress) Executors(java.util.concurrent.Executors) MockServerSocket(org.elasticsearch.mocksocket.MockServerSocket) TimeUnit(java.util.concurrent.TimeUnit) Consumer(java.util.function.Consumer) CountDownLatch(java.util.concurrent.CountDownLatch) AbstractRunnable(org.elasticsearch.common.util.concurrent.AbstractRunnable) List(java.util.List) Version(org.elasticsearch.Version) StreamInput(org.elasticsearch.common.io.stream.StreamInput) Closeable(java.io.Closeable) Collections(java.util.Collections) MockSocket(org.elasticsearch.mocksocket.MockSocket) SocketTimeoutException(java.net.SocketTimeoutException) InetSocketAddress(java.net.InetSocketAddress) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) TimeValue(org.elasticsearch.common.unit.TimeValue)

Example 29 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project elasticsearch by elastic.

the class RemoteClusterConnectionTests method testCloseWhileConcurrentlyConnecting.

public void testCloseWhileConcurrentlyConnecting() throws IOException, InterruptedException, BrokenBarrierException {
    List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
    try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT);
        MockTransportService seedTransport1 = startTransport("seed_node_1", knownNodes, Version.CURRENT);
        MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) {
        DiscoveryNode seedNode = seedTransport.getLocalDiscoNode();
        DiscoveryNode seedNode1 = seedTransport1.getLocalDiscoNode();
        knownNodes.add(seedTransport.getLocalDiscoNode());
        knownNodes.add(discoverableTransport.getLocalDiscoNode());
        knownNodes.add(seedTransport1.getLocalDiscoNode());
        Collections.shuffle(knownNodes, random());
        List<DiscoveryNode> seedNodes = Arrays.asList(seedNode1, seedNode);
        Collections.shuffle(seedNodes, random());
        try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
            service.start();
            service.acceptIncomingRequests();
            try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", seedNodes, service, Integer.MAX_VALUE, n -> true)) {
                int numThreads = randomIntBetween(4, 10);
                Thread[] threads = new Thread[numThreads];
                CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
                for (int i = 0; i < threads.length; i++) {
                    final int numConnectionAttempts = randomIntBetween(10, 100);
                    threads[i] = new Thread() {

                        @Override
                        public void run() {
                            try {
                                barrier.await();
                                CountDownLatch latch = new CountDownLatch(numConnectionAttempts);
                                for (int i = 0; i < numConnectionAttempts; i++) {
                                    AtomicReference<RuntimeException> executed = new AtomicReference<>();
                                    ActionListener<Void> listener = ActionListener.wrap(x -> {
                                        if (executed.compareAndSet(null, new RuntimeException())) {
                                            latch.countDown();
                                        } else {
                                            throw new AssertionError("shit's been called twice", executed.get());
                                        }
                                    }, x -> {
                                        if (executed.compareAndSet(null, new RuntimeException())) {
                                            latch.countDown();
                                        } else {
                                            throw new AssertionError("shit's been called twice", executed.get());
                                        }
                                        if (x instanceof RejectedExecutionException || x instanceof AlreadyClosedException || x instanceof CancellableThreads.ExecutionCancelledException) {
                                        } else {
                                            throw new AssertionError(x);
                                        }
                                    });
                                    connection.updateSeedNodes(seedNodes, listener);
                                }
                                latch.await();
                            } catch (Exception ex) {
                                throw new AssertionError(ex);
                            }
                        }
                    };
                    threads[i].start();
                }
                barrier.await();
                connection.close();
            }
        }
    }
}
Also used : CancellableThreads(org.elasticsearch.common.util.CancellableThreads) Socket(java.net.Socket) Arrays(java.util.Arrays) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AlreadyConnectedException(java.nio.channels.AlreadyConnectedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicReference(java.util.concurrent.atomic.AtomicReference) ClusterSearchShardsRequest(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest) InetAddress(java.net.InetAddress) ServerSocket(java.net.ServerSocket) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Settings(org.elasticsearch.common.settings.Settings) ThreadPool(org.elasticsearch.threadpool.ThreadPool) ClusterName(org.elasticsearch.cluster.ClusterName) ESTestCase(org.elasticsearch.test.ESTestCase) MockTransportService(org.elasticsearch.test.transport.MockTransportService) ClusterSearchShardsAction(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction) ClusterSearchShardsGroup(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup) Collections.emptyMap(java.util.Collections.emptyMap) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) CyclicBarrier(java.util.concurrent.CyclicBarrier) Collections.emptySet(java.util.Collections.emptySet) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) InetSocketAddress(java.net.InetSocketAddress) UnknownHostException(java.net.UnknownHostException) MockServerSocket(org.elasticsearch.mocksocket.MockServerSocket) ClusterSearchShardsResponse(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse) ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) UncheckedIOException(java.io.UncheckedIOException) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Version(org.elasticsearch.Version) SuppressForbidden(org.elasticsearch.common.SuppressForbidden) TransportAddress(org.elasticsearch.common.transport.TransportAddress) TransportConnectionListener(org.elasticsearch.transport.TransportConnectionListener) ClusterStateRequest(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest) ClusterStateAction(org.elasticsearch.action.admin.cluster.state.ClusterStateAction) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) MockTransportService(org.elasticsearch.test.transport.MockTransportService) AtomicReference(java.util.concurrent.atomic.AtomicReference) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) CountDownLatch(java.util.concurrent.CountDownLatch) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AlreadyConnectedException(java.nio.channels.AlreadyConnectedException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) UnknownHostException(java.net.UnknownHostException) UncheckedIOException(java.io.UncheckedIOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Example 30 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project elasticsearch by elastic.

the class RemoteClusterConnectionTests method testTriggerUpdatesConcurrently.

public void testTriggerUpdatesConcurrently() throws IOException, InterruptedException {
    List<DiscoveryNode> knownNodes = new CopyOnWriteArrayList<>();
    try (MockTransportService seedTransport = startTransport("seed_node", knownNodes, Version.CURRENT);
        MockTransportService seedTransport1 = startTransport("seed_node_1", knownNodes, Version.CURRENT);
        MockTransportService discoverableTransport = startTransport("discoverable_node", knownNodes, Version.CURRENT)) {
        DiscoveryNode seedNode = seedTransport.getLocalDiscoNode();
        DiscoveryNode discoverableNode = discoverableTransport.getLocalDiscoNode();
        DiscoveryNode seedNode1 = seedTransport1.getLocalDiscoNode();
        knownNodes.add(seedTransport.getLocalDiscoNode());
        knownNodes.add(discoverableTransport.getLocalDiscoNode());
        knownNodes.add(seedTransport1.getLocalDiscoNode());
        Collections.shuffle(knownNodes, random());
        List<DiscoveryNode> seedNodes = Arrays.asList(seedNode1, seedNode);
        Collections.shuffle(seedNodes, random());
        try (MockTransportService service = MockTransportService.createNewService(Settings.EMPTY, Version.CURRENT, threadPool, null)) {
            service.start();
            service.acceptIncomingRequests();
            try (RemoteClusterConnection connection = new RemoteClusterConnection(Settings.EMPTY, "test-cluster", seedNodes, service, Integer.MAX_VALUE, n -> true)) {
                int numThreads = randomIntBetween(4, 10);
                Thread[] threads = new Thread[numThreads];
                CyclicBarrier barrier = new CyclicBarrier(numThreads);
                for (int i = 0; i < threads.length; i++) {
                    final int numConnectionAttempts = randomIntBetween(10, 200);
                    threads[i] = new Thread() {

                        @Override
                        public void run() {
                            try {
                                barrier.await();
                                CountDownLatch latch = new CountDownLatch(numConnectionAttempts);
                                for (int i = 0; i < numConnectionAttempts; i++) {
                                    AtomicBoolean executed = new AtomicBoolean(false);
                                    ActionListener<Void> listener = ActionListener.wrap(x -> {
                                        assertTrue(executed.compareAndSet(false, true));
                                        latch.countDown();
                                    }, x -> {
                                        assertTrue(executed.compareAndSet(false, true));
                                        latch.countDown();
                                        if (x instanceof RejectedExecutionException) {
                                        } else {
                                            throw new AssertionError(x);
                                        }
                                    });
                                    connection.updateSeedNodes(seedNodes, listener);
                                }
                                latch.await();
                            } catch (Exception ex) {
                                throw new AssertionError(ex);
                            }
                        }
                    };
                    threads[i].start();
                }
                for (int i = 0; i < threads.length; i++) {
                    threads[i].join();
                }
                assertTrue(service.nodeConnected(seedNode));
                assertTrue(service.nodeConnected(discoverableNode));
                assertTrue(service.nodeConnected(seedNode1));
                assertTrue(connection.assertNoRunningConnections());
            }
        }
    }
}
Also used : CancellableThreads(org.elasticsearch.common.util.CancellableThreads) Socket(java.net.Socket) Arrays(java.util.Arrays) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AlreadyConnectedException(java.nio.channels.AlreadyConnectedException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) AtomicReference(java.util.concurrent.atomic.AtomicReference) ClusterSearchShardsRequest(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsRequest) InetAddress(java.net.InetAddress) ServerSocket(java.net.ServerSocket) ClusterState(org.elasticsearch.cluster.ClusterState) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Settings(org.elasticsearch.common.settings.Settings) ThreadPool(org.elasticsearch.threadpool.ThreadPool) ClusterName(org.elasticsearch.cluster.ClusterName) ESTestCase(org.elasticsearch.test.ESTestCase) MockTransportService(org.elasticsearch.test.transport.MockTransportService) ClusterSearchShardsAction(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsAction) ClusterSearchShardsGroup(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsGroup) Collections.emptyMap(java.util.Collections.emptyMap) DiscoveryNodes(org.elasticsearch.cluster.node.DiscoveryNodes) TestThreadPool(org.elasticsearch.threadpool.TestThreadPool) CyclicBarrier(java.util.concurrent.CyclicBarrier) Collections.emptySet(java.util.Collections.emptySet) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) InetSocketAddress(java.net.InetSocketAddress) UnknownHostException(java.net.UnknownHostException) MockServerSocket(org.elasticsearch.mocksocket.MockServerSocket) ClusterSearchShardsResponse(org.elasticsearch.action.admin.cluster.shards.ClusterSearchShardsResponse) ClusterStateResponse(org.elasticsearch.action.admin.cluster.state.ClusterStateResponse) UncheckedIOException(java.io.UncheckedIOException) TimeUnit(java.util.concurrent.TimeUnit) CountDownLatch(java.util.concurrent.CountDownLatch) List(java.util.List) Version(org.elasticsearch.Version) SuppressForbidden(org.elasticsearch.common.SuppressForbidden) TransportAddress(org.elasticsearch.common.transport.TransportAddress) TransportConnectionListener(org.elasticsearch.transport.TransportConnectionListener) ClusterStateRequest(org.elasticsearch.action.admin.cluster.state.ClusterStateRequest) ClusterStateAction(org.elasticsearch.action.admin.cluster.state.ClusterStateAction) Collections(java.util.Collections) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) DiscoveryNode(org.elasticsearch.cluster.node.DiscoveryNode) MockTransportService(org.elasticsearch.test.transport.MockTransportService) CountDownLatch(java.util.concurrent.CountDownLatch) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) AlreadyClosedException(org.apache.lucene.store.AlreadyClosedException) AlreadyConnectedException(java.nio.channels.AlreadyConnectedException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) IOException(java.io.IOException) BrokenBarrierException(java.util.concurrent.BrokenBarrierException) UnknownHostException(java.net.UnknownHostException) UncheckedIOException(java.io.UncheckedIOException) CyclicBarrier(java.util.concurrent.CyclicBarrier) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) ActionListener(org.elasticsearch.action.ActionListener) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList)

Aggregations

RejectedExecutionException (java.util.concurrent.RejectedExecutionException)231 ExecutorService (java.util.concurrent.ExecutorService)42 IOException (java.io.IOException)31 Test (org.junit.Test)29 Future (java.util.concurrent.Future)19 ArrayList (java.util.ArrayList)18 ExecutionException (java.util.concurrent.ExecutionException)15 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)15 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)14 Executor (java.util.concurrent.Executor)13 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)12 List (java.util.List)11 TaskRejectedException (org.springframework.core.task.TaskRejectedException)11 BitmapDrawable (android.graphics.drawable.BitmapDrawable)10 Animation (android.view.animation.Animation)10 Map (java.util.Map)10 CancellationException (java.util.concurrent.CancellationException)10 CacheableBitmapDrawable (uk.co.senab.bitmapcache.CacheableBitmapDrawable)10 ParallelTest (com.hazelcast.test.annotation.ParallelTest)9 QuickTest (com.hazelcast.test.annotation.QuickTest)9