Search in sources :

Example 56 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project Hystrix by Netflix.

the class HystrixCommandTest method testRejectedThreadUsingQueueSize.

/**
     * Test that we can reject a thread using isQueueSpaceAvailable() instead of just when the pool rejects.
     * <p>
     * For example, we have queue size set to 100 but want to reject when we hit 10.
     * <p>
     * This allows us to use FastProperties to control our rejection point whereas we can't resize a queue after it's created.
     */
@Test
public void testRejectedThreadUsingQueueSize() {
    HystrixCommandKey key = HystrixCommandKey.Factory.asKey("Rejection-B");
    TestCircuitBreaker circuitBreaker = new TestCircuitBreaker();
    SingleThreadedPoolWithQueue pool = new SingleThreadedPoolWithQueue(10, 1);
    // put 1 item in the queue
    // the thread pool won't pick it up because we're bypassing the pool and adding to the queue directly so this will keep the queue full
    pool.queue.add(new Runnable() {

        @Override
        public void run() {
            System.out.println("**** queue filler1 ****");
            try {
                Thread.sleep(500);
            } catch (InterruptedException e) {
                e.printStackTrace();
            }
        }
    });
    TestCommandRejection command = new TestCommandRejection(key, circuitBreaker, pool, 500, 600, TestCommandRejection.FALLBACK_NOT_IMPLEMENTED);
    try {
        // this should fail as we already have 1 in the queue
        command.queue();
        fail("we shouldn't get here");
    } catch (Exception e) {
        e.printStackTrace();
        assertTrue(command.isResponseRejected());
        assertFalse(command.isResponseShortCircuited());
        assertFalse(command.isResponseTimedOut());
        assertNotNull(command.getExecutionException());
        if (e instanceof HystrixRuntimeException && e.getCause() instanceof RejectedExecutionException) {
            HystrixRuntimeException de = (HystrixRuntimeException) e;
            assertNotNull(de.getFallbackException());
            assertTrue(de.getFallbackException() instanceof UnsupportedOperationException);
            assertNotNull(de.getImplementingClass());
            assertNotNull(de.getCause());
            assertTrue(de.getCause() instanceof RejectedExecutionException);
        } else {
            fail("the exception should be HystrixRuntimeException with cause as RejectedExecutionException");
        }
    }
    assertCommandExecutionEvents(command, HystrixEventType.THREAD_POOL_REJECTED, HystrixEventType.FALLBACK_MISSING);
    assertEquals(0, circuitBreaker.metrics.getCurrentConcurrentExecutionCount());
    assertSaneHystrixRequestLog(1);
}
Also used : TestCircuitBreaker(com.netflix.hystrix.HystrixCircuitBreakerTest.TestCircuitBreaker) HystrixContextRunnable(com.netflix.hystrix.strategy.concurrency.HystrixContextRunnable) HystrixRuntimeException(com.netflix.hystrix.exception.HystrixRuntimeException) TimeoutException(java.util.concurrent.TimeoutException) HystrixRuntimeException(com.netflix.hystrix.exception.HystrixRuntimeException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) HystrixBadRequestException(com.netflix.hystrix.exception.HystrixBadRequestException) CancellationException(java.util.concurrent.CancellationException) IOException(java.io.IOException) ExecutionException(java.util.concurrent.ExecutionException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Test(org.junit.Test)

Example 57 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project ribbon by Netflix.

the class EurekaNotificationServerListUpdaterTest method testSubmitExceptionClearQueued.

@Test
public void testSubmitExceptionClearQueued() {
    ThreadPoolExecutor executorMock = EasyMock.createMock(ThreadPoolExecutor.class);
    EasyMock.expect(executorMock.submit(EasyMock.isA(Runnable.class))).andThrow(new RejectedExecutionException("test exception"));
    EasyMock.expect(executorMock.isShutdown()).andReturn(Boolean.FALSE);
    EurekaNotificationServerListUpdater serverListUpdater = new EurekaNotificationServerListUpdater(new Provider<EurekaClient>() {

        @Override
        public EurekaClient get() {
            return eurekaClientMock;
        }
    }, executorMock);
    try {
        Capture<EurekaEventListener> eventListenerCapture = new Capture<EurekaEventListener>();
        eurekaClientMock.registerEventListener(EasyMock.capture(eventListenerCapture));
        EasyMock.replay(eurekaClientMock);
        EasyMock.replay(executorMock);
        serverListUpdater.start(new ServerListUpdater.UpdateAction() {

            @Override
            public void doUpdate() {
                Assert.fail("should not reach here");
            }
        });
        eventListenerCapture.getValue().onEvent(new CacheRefreshedEvent());
        Assert.assertFalse(serverListUpdater.updateQueued.get());
    } finally {
        serverListUpdater.stop();
        EasyMock.verify(executorMock);
        EasyMock.verify(eurekaClientMock);
    }
}
Also used : EurekaClient(com.netflix.discovery.EurekaClient) EurekaEventListener(com.netflix.discovery.EurekaEventListener) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Capture(org.easymock.Capture) ServerListUpdater(com.netflix.loadbalancer.ServerListUpdater) CacheRefreshedEvent(com.netflix.discovery.CacheRefreshedEvent) ThreadPoolExecutor(java.util.concurrent.ThreadPoolExecutor) Test(org.junit.Test)

Example 58 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project ribbon by Netflix.

the class PrimeConnections method primeConnectionsAsync.

/*
    private void makeConnectionsASync() {
        Callable<Void> ft = new Callable<Void>() {
            public Void call() throws Exception {
                logger.info("primeConnections ...");
                makeConnections();
                return null;
            }
        };
        outerExecutorService.submit(ft);
    }
    */
/**
     * Prime servers asynchronously.
     * 
     * @param servers
     * @param listener
     */
public List<Future<Boolean>> primeConnectionsAsync(final List<Server> servers, final PrimeConnectionListener listener) {
    if (servers == null) {
        return Collections.emptyList();
    }
    List<Server> allServers = new ArrayList<Server>();
    allServers.addAll(servers);
    if (allServers.size() == 0) {
        logger.debug("RestClient:" + name + ". No nodes/servers to prime connections");
        return Collections.emptyList();
    }
    logger.info("Priming Connections for RestClient:" + name + ", numServers:" + allServers.size());
    List<Future<Boolean>> ftList = new ArrayList<Future<Boolean>>();
    for (Server s : allServers) {
        // prevent the server to be used by load balancer
        // will be set to true when priming is done
        s.setReadyToServe(false);
        if (aSync) {
            Future<Boolean> ftC = null;
            try {
                ftC = makeConnectionASync(s, listener);
                ftList.add(ftC);
            } catch (RejectedExecutionException ree) {
                logger.error("executor submit failed", ree);
            } catch (Exception e) {
                logger.error("general error", e);
            // It does not really matter if there was an exception,
            // the goal here is to attempt "priming/opening" the route
            // in ec2 .. actual http results do not matter
            }
        } else {
            connectToServer(s, listener);
        }
    }
    return ftList;
}
Also used : Server(com.netflix.loadbalancer.Server) ArrayList(java.util.ArrayList) Future(java.util.concurrent.Future) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) RejectedExecutionException(java.util.concurrent.RejectedExecutionException)

Example 59 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project druid by druid-io.

the class KafkaIndexTask method run.

@Override
public TaskStatus run(final TaskToolbox toolbox) throws Exception {
    log.info("Starting up!");
    startTime = DateTime.now();
    mapper = toolbox.getObjectMapper();
    status = Status.STARTING;
    if (chatHandlerProvider.isPresent()) {
        log.info("Found chat handler of class[%s]", chatHandlerProvider.get().getClass().getName());
        chatHandlerProvider.get().register(getId(), this, false);
    } else {
        log.warn("No chat handler detected");
    }
    runThread = Thread.currentThread();
    // Set up FireDepartmentMetrics
    final FireDepartment fireDepartmentForMetrics = new FireDepartment(dataSchema, new RealtimeIOConfig(null, null, null), null);
    fireDepartmentMetrics = fireDepartmentForMetrics.getMetrics();
    toolbox.getMonitorScheduler().addMonitor(new RealtimeMetricsMonitor(ImmutableList.of(fireDepartmentForMetrics), ImmutableMap.of(DruidMetrics.TASK_ID, new String[] { getId() })));
    try (final Appenderator appenderator0 = newAppenderator(fireDepartmentMetrics, toolbox);
        final FiniteAppenderatorDriver driver = newDriver(appenderator0, toolbox, fireDepartmentMetrics);
        final KafkaConsumer<byte[], byte[]> consumer = newConsumer()) {
        appenderator = appenderator0;
        final String topic = ioConfig.getStartPartitions().getTopic();
        // Start up, set up initial offsets.
        final Object restoredMetadata = driver.startJob();
        if (restoredMetadata == null) {
            nextOffsets.putAll(ioConfig.getStartPartitions().getPartitionOffsetMap());
        } else {
            final Map<String, Object> restoredMetadataMap = (Map) restoredMetadata;
            final KafkaPartitions restoredNextPartitions = toolbox.getObjectMapper().convertValue(restoredMetadataMap.get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
            nextOffsets.putAll(restoredNextPartitions.getPartitionOffsetMap());
            // Sanity checks.
            if (!restoredNextPartitions.getTopic().equals(ioConfig.getStartPartitions().getTopic())) {
                throw new ISE("WTF?! Restored topic[%s] but expected topic[%s]", restoredNextPartitions.getTopic(), ioConfig.getStartPartitions().getTopic());
            }
            if (!nextOffsets.keySet().equals(ioConfig.getStartPartitions().getPartitionOffsetMap().keySet())) {
                throw new ISE("WTF?! Restored partitions[%s] but expected partitions[%s]", nextOffsets.keySet(), ioConfig.getStartPartitions().getPartitionOffsetMap().keySet());
            }
        }
        // Set up sequenceNames.
        final Map<Integer, String> sequenceNames = Maps.newHashMap();
        for (Integer partitionNum : nextOffsets.keySet()) {
            sequenceNames.put(partitionNum, String.format("%s_%s", ioConfig.getBaseSequenceName(), partitionNum));
        }
        // Set up committer.
        final Supplier<Committer> committerSupplier = new Supplier<Committer>() {

            @Override
            public Committer get() {
                final Map<Integer, Long> snapshot = ImmutableMap.copyOf(nextOffsets);
                return new Committer() {

                    @Override
                    public Object getMetadata() {
                        return ImmutableMap.of(METADATA_NEXT_PARTITIONS, new KafkaPartitions(ioConfig.getStartPartitions().getTopic(), snapshot));
                    }

                    @Override
                    public void run() {
                    // Do nothing.
                    }
                };
            }
        };
        Set<Integer> assignment = assignPartitionsAndSeekToNext(consumer, topic);
        // Main loop.
        // Could eventually support leader/follower mode (for keeping replicas more in sync)
        boolean stillReading = !assignment.isEmpty();
        status = Status.READING;
        try {
            while (stillReading) {
                if (possiblyPause(assignment)) {
                    // The partition assignments may have changed while paused by a call to setEndOffsets() so reassign
                    // partitions upon resuming. This is safe even if the end offsets have not been modified.
                    assignment = assignPartitionsAndSeekToNext(consumer, topic);
                    if (assignment.isEmpty()) {
                        log.info("All partitions have been fully read");
                        publishOnStop = true;
                        stopRequested = true;
                    }
                }
                if (stopRequested) {
                    break;
                }
                // The retrying business is because the KafkaConsumer throws OffsetOutOfRangeException if the seeked-to
                // offset is not present in the topic-partition. This can happen if we're asking a task to read from data
                // that has not been written yet (which is totally legitimate). So let's wait for it to show up.
                ConsumerRecords<byte[], byte[]> records = ConsumerRecords.empty();
                try {
                    records = consumer.poll(POLL_TIMEOUT);
                } catch (OffsetOutOfRangeException e) {
                    log.warn("OffsetOutOfRangeException with message [%s]", e.getMessage());
                    possiblyResetOffsetsOrWait(e.offsetOutOfRangePartitions(), consumer, toolbox);
                    stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
                }
                for (ConsumerRecord<byte[], byte[]> record : records) {
                    if (log.isTraceEnabled()) {
                        log.trace("Got topic[%s] partition[%d] offset[%,d].", record.topic(), record.partition(), record.offset());
                    }
                    if (record.offset() < endOffsets.get(record.partition())) {
                        if (record.offset() != nextOffsets.get(record.partition())) {
                            throw new ISE("WTF?! Got offset[%,d] after offset[%,d] in partition[%d].", record.offset(), nextOffsets.get(record.partition()), record.partition());
                        }
                        try {
                            final byte[] valueBytes = record.value();
                            if (valueBytes == null) {
                                throw new ParseException("null value");
                            }
                            final InputRow row = Preconditions.checkNotNull(parser.parse(ByteBuffer.wrap(valueBytes)), "row");
                            if (!ioConfig.getMinimumMessageTime().isPresent() || !ioConfig.getMinimumMessageTime().get().isAfter(row.getTimestamp())) {
                                final SegmentIdentifier identifier = driver.add(row, sequenceNames.get(record.partition()), committerSupplier);
                                if (identifier == null) {
                                    // If we allow continuing, then consider blacklisting the interval for a while to avoid constant checks.
                                    throw new ISE("Could not allocate segment for row with timestamp[%s]", row.getTimestamp());
                                }
                                fireDepartmentMetrics.incrementProcessed();
                            } else {
                                fireDepartmentMetrics.incrementThrownAway();
                            }
                        } catch (ParseException e) {
                            if (tuningConfig.isReportParseExceptions()) {
                                throw e;
                            } else {
                                log.debug(e, "Dropping unparseable row from partition[%d] offset[%,d].", record.partition(), record.offset());
                                fireDepartmentMetrics.incrementUnparseable();
                            }
                        }
                        nextOffsets.put(record.partition(), record.offset() + 1);
                    }
                    if (nextOffsets.get(record.partition()).equals(endOffsets.get(record.partition())) && assignment.remove(record.partition())) {
                        log.info("Finished reading topic[%s], partition[%,d].", record.topic(), record.partition());
                        assignPartitions(consumer, topic, assignment);
                        stillReading = ioConfig.isPauseAfterRead() || !assignment.isEmpty();
                    }
                }
            }
        } finally {
            // persist pending data
            driver.persist(committerSupplier.get());
        }
        synchronized (statusLock) {
            if (stopRequested && !publishOnStop) {
                throw new InterruptedException("Stopping without publishing");
            }
            status = Status.PUBLISHING;
        }
        final TransactionalSegmentPublisher publisher = new TransactionalSegmentPublisher() {

            @Override
            public boolean publishSegments(Set<DataSegment> segments, Object commitMetadata) throws IOException {
                final KafkaPartitions finalPartitions = toolbox.getObjectMapper().convertValue(((Map) commitMetadata).get(METADATA_NEXT_PARTITIONS), KafkaPartitions.class);
                // Sanity check, we should only be publishing things that match our desired end state.
                if (!endOffsets.equals(finalPartitions.getPartitionOffsetMap())) {
                    throw new ISE("WTF?! Driver attempted to publish invalid metadata[%s].", commitMetadata);
                }
                final SegmentTransactionalInsertAction action;
                if (ioConfig.isUseTransaction()) {
                    action = new SegmentTransactionalInsertAction(segments, new KafkaDataSourceMetadata(ioConfig.getStartPartitions()), new KafkaDataSourceMetadata(finalPartitions));
                } else {
                    action = new SegmentTransactionalInsertAction(segments, null, null);
                }
                log.info("Publishing with isTransaction[%s].", ioConfig.isUseTransaction());
                return toolbox.getTaskActionClient().submit(action).isSuccess();
            }
        };
        final SegmentsAndMetadata published = driver.finish(publisher, committerSupplier.get());
        if (published == null) {
            throw new ISE("Transaction failure publishing segments, aborting");
        } else {
            log.info("Published segments[%s] with metadata[%s].", Joiner.on(", ").join(Iterables.transform(published.getSegments(), new Function<DataSegment, String>() {

                @Override
                public String apply(DataSegment input) {
                    return input.getIdentifier();
                }
            })), published.getCommitMetadata());
        }
    } catch (InterruptedException | RejectedExecutionException e) {
        // handle the InterruptedException that gets wrapped in a RejectedExecutionException
        if (e instanceof RejectedExecutionException && (e.getCause() == null || !(e.getCause() instanceof InterruptedException))) {
            throw e;
        }
        // if we were interrupted because we were asked to stop, handle the exception and return success, else rethrow
        if (!stopRequested) {
            Thread.currentThread().interrupt();
            throw e;
        }
        log.info("The task was asked to stop before completing");
    } finally {
        if (chatHandlerProvider.isPresent()) {
            chatHandlerProvider.get().unregister(getId());
        }
    }
    return success();
}
Also used : RealtimeIOConfig(io.druid.segment.indexing.RealtimeIOConfig) Set(java.util.Set) SegmentIdentifier(io.druid.segment.realtime.appenderator.SegmentIdentifier) SegmentTransactionalInsertAction(io.druid.indexing.common.actions.SegmentTransactionalInsertAction) DataSegment(io.druid.timeline.DataSegment) FireDepartment(io.druid.segment.realtime.FireDepartment) TransactionalSegmentPublisher(io.druid.segment.realtime.appenderator.TransactionalSegmentPublisher) ISE(io.druid.java.util.common.ISE) Supplier(com.google.common.base.Supplier) SegmentsAndMetadata(io.druid.segment.realtime.appenderator.SegmentsAndMetadata) RejectedExecutionException(java.util.concurrent.RejectedExecutionException) Appenderator(io.druid.segment.realtime.appenderator.Appenderator) FiniteAppenderatorDriver(io.druid.segment.realtime.appenderator.FiniteAppenderatorDriver) InputRow(io.druid.data.input.InputRow) RealtimeMetricsMonitor(io.druid.segment.realtime.RealtimeMetricsMonitor) Committer(io.druid.data.input.Committer) ParseException(io.druid.java.util.common.parsers.ParseException) OffsetOutOfRangeException(org.apache.kafka.clients.consumer.OffsetOutOfRangeException) Map(java.util.Map) ImmutableMap(com.google.common.collect.ImmutableMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap)

Example 60 with RejectedExecutionException

use of java.util.concurrent.RejectedExecutionException in project che by eclipse.

the class CronThreadPoolExecutor method schedule.

@Override
public Future<?> schedule(final Runnable task, final CronExpression expression) {
    if (task == null) {
        throw new NullPointerException();
    }
    setCorePoolSize(getCorePoolSize() + 1);
    Runnable scheduleTask = new Runnable() {

        @Override
        public void run() {
            CountDownLatch countDownLatch = new CountDownLatch(1);
            cronJobWatchDogs.add(countDownLatch);
            Date now = new Date();
            Date time = expression.getNextValidTimeAfter(now);
            try {
                while (time != null) {
                    CronThreadPoolExecutor.this.schedule(task, time.getTime() - now.getTime(), TimeUnit.MILLISECONDS);
                    while (now.before(time)) {
                        LOG.debug("Cron watch dog wait {} ", time.getTime() - now.getTime());
                        if (countDownLatch.await(time.getTime() - now.getTime(), TimeUnit.MILLISECONDS)) {
                            LOG.debug("Stopping cron watch dog.");
                            return;
                        }
                        now = new Date();
                    }
                    time = expression.getNextValidTimeAfter(now);
                }
            } catch (InterruptedException e) {
                Thread.currentThread().interrupt();
            } catch (RejectedExecutionException | CancellationException e) {
                LOG.error(e.getMessage(), e);
            }
        }
    };
    return this.submit(scheduleTask);
}
Also used : CancellationException(java.util.concurrent.CancellationException) CountDownLatch(java.util.concurrent.CountDownLatch) Date(java.util.Date) RejectedExecutionException(java.util.concurrent.RejectedExecutionException)

Aggregations

RejectedExecutionException (java.util.concurrent.RejectedExecutionException)246 ExecutorService (java.util.concurrent.ExecutorService)42 IOException (java.io.IOException)34 Test (org.junit.Test)34 Future (java.util.concurrent.Future)19 ArrayList (java.util.ArrayList)18 Executor (java.util.concurrent.Executor)18 ExecutionException (java.util.concurrent.ExecutionException)15 ScheduledExecutorService (java.util.concurrent.ScheduledExecutorService)15 ThreadPoolExecutor (java.util.concurrent.ThreadPoolExecutor)15 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)14 List (java.util.List)11 TaskRejectedException (org.springframework.core.task.TaskRejectedException)11 BitmapDrawable (android.graphics.drawable.BitmapDrawable)10 Animation (android.view.animation.Animation)10 Map (java.util.Map)10 CancellationException (java.util.concurrent.CancellationException)10 CacheableBitmapDrawable (uk.co.senab.bitmapcache.CacheableBitmapDrawable)10 ParallelTest (com.hazelcast.test.annotation.ParallelTest)9 QuickTest (com.hazelcast.test.annotation.QuickTest)9