use of org.apache.camel.spi.RecoverableAggregationRepository in project camel by apache.
the class AggregateProcessor method doStart.
@Override
protected void doStart() throws Exception {
AggregationStrategy strategy = aggregationStrategy;
if (strategy instanceof DelegateAggregationStrategy) {
strategy = ((DelegateAggregationStrategy) strategy).getDelegate();
}
if (strategy instanceof PreCompletionAwareAggregationStrategy) {
preCompletion = true;
LOG.info("PreCompletionAwareAggregationStrategy detected. Aggregator {} is in pre-completion mode.", getId());
}
if (!preCompletion) {
// if not in pre completion mode then check we configured the completion required
if (getCompletionTimeout() <= 0 && getCompletionInterval() <= 0 && getCompletionSize() <= 0 && getCompletionPredicate() == null && !isCompletionFromBatchConsumer() && getCompletionTimeoutExpression() == null && getCompletionSizeExpression() == null) {
throw new IllegalStateException("At least one of the completions options" + " [completionTimeout, completionInterval, completionSize, completionPredicate, completionFromBatchConsumer] must be set");
}
}
if (getCloseCorrelationKeyOnCompletion() != null) {
if (getCloseCorrelationKeyOnCompletion() > 0) {
LOG.info("Using ClosedCorrelationKeys with a LRUCache with a capacity of " + getCloseCorrelationKeyOnCompletion());
closedCorrelationKeys = new LRUCache<String, String>(getCloseCorrelationKeyOnCompletion());
} else {
LOG.info("Using ClosedCorrelationKeys with unbounded capacity");
closedCorrelationKeys = new ConcurrentHashMap<String, String>();
}
}
if (aggregationRepository == null) {
aggregationRepository = new MemoryAggregationRepository(optimisticLocking);
LOG.info("Defaulting to MemoryAggregationRepository");
}
if (optimisticLocking) {
if (!(aggregationRepository instanceof OptimisticLockingAggregationRepository)) {
throw new IllegalArgumentException("Optimistic locking cannot be enabled without using an AggregationRepository that implements OptimisticLockingAggregationRepository");
}
LOG.info("Optimistic locking is enabled");
}
ServiceHelper.startServices(aggregationStrategy, processor, aggregationRepository);
// should we use recover checker
if (aggregationRepository instanceof RecoverableAggregationRepository) {
RecoverableAggregationRepository recoverable = (RecoverableAggregationRepository) aggregationRepository;
if (recoverable.isUseRecovery()) {
long interval = recoverable.getRecoveryIntervalInMillis();
if (interval <= 0) {
throw new IllegalArgumentException("AggregationRepository has recovery enabled and the RecoveryInterval option must be a positive number, was: " + interval);
}
// create a background recover thread to check every interval
recoverService = camelContext.getExecutorServiceManager().newScheduledThreadPool(this, "AggregateRecoverChecker", 1);
Runnable recoverTask = new RecoverTask(recoverable);
LOG.info("Using RecoverableAggregationRepository by scheduling recover checker to run every " + interval + " millis.");
// use fixed delay so there is X interval between each run
recoverService.scheduleWithFixedDelay(recoverTask, 1000L, interval, TimeUnit.MILLISECONDS);
if (recoverable.getDeadLetterUri() != null) {
int max = recoverable.getMaximumRedeliveries();
if (max <= 0) {
throw new IllegalArgumentException("Option maximumRedeliveries must be a positive number, was: " + max);
}
LOG.info("After " + max + " failed redelivery attempts Exchanges will be moved to deadLetterUri: " + recoverable.getDeadLetterUri());
// dead letter uri must be a valid endpoint
Endpoint endpoint = camelContext.getEndpoint(recoverable.getDeadLetterUri());
if (endpoint == null) {
throw new NoSuchEndpointException(recoverable.getDeadLetterUri());
}
deadLetterProducerTemplate = camelContext.createProducerTemplate();
}
}
}
if (getCompletionInterval() > 0 && getCompletionTimeout() > 0) {
throw new IllegalArgumentException("Only one of completionInterval or completionTimeout can be used, not both.");
}
if (getCompletionInterval() > 0) {
LOG.info("Using CompletionInterval to run every " + getCompletionInterval() + " millis.");
if (getTimeoutCheckerExecutorService() == null) {
setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
shutdownTimeoutCheckerExecutorService = true;
}
// trigger completion based on interval
getTimeoutCheckerExecutorService().scheduleAtFixedRate(new AggregationIntervalTask(), getCompletionInterval(), getCompletionInterval(), TimeUnit.MILLISECONDS);
}
// start timeout service if its in use
if (getCompletionTimeout() > 0 || getCompletionTimeoutExpression() != null) {
LOG.info("Using CompletionTimeout to trigger after " + getCompletionTimeout() + " millis of inactivity.");
if (getTimeoutCheckerExecutorService() == null) {
setTimeoutCheckerExecutorService(camelContext.getExecutorServiceManager().newScheduledThreadPool(this, AGGREGATE_TIMEOUT_CHECKER, 1));
shutdownTimeoutCheckerExecutorService = true;
}
// check for timed out aggregated messages once every second
timeoutMap = new AggregationTimeoutMap(getTimeoutCheckerExecutorService(), 1000L);
// fill in existing timeout values from the aggregation repository, for example if a restart occurred, then we
// need to re-establish the timeout map so timeout can trigger
restoreTimeoutMapFromAggregationRepository();
ServiceHelper.startService(timeoutMap);
}
if (aggregateController == null) {
aggregateController = new DefaultAggregateController();
}
aggregateController.onStart(this);
}
use of org.apache.camel.spi.RecoverableAggregationRepository in project camel by apache.
the class AggregateProcessor method doAggregation.
/**
* Aggregates the exchange with the given correlation key
* <p/>
* This method <b>must</b> be run synchronized as we cannot aggregate the same correlation key
* in parallel.
* <p/>
* The returned {@link Exchange} should be send downstream using the {@link #onSubmitCompletion(String, org.apache.camel.Exchange)}
* method which sends out the aggregated and completed {@link Exchange}.
*
* @param key the correlation key
* @param newExchange the exchange
* @return the aggregated exchange(s) which is complete, or <tt>null</tt> if not yet complete
* @throws org.apache.camel.CamelExchangeException is thrown if error aggregating
*/
private List<Exchange> doAggregation(String key, Exchange newExchange) throws CamelExchangeException {
LOG.trace("onAggregation +++ start +++ with correlation key: {}", key);
List<Exchange> list = new ArrayList<Exchange>();
String complete = null;
Exchange answer;
Exchange originalExchange = aggregationRepository.get(newExchange.getContext(), key);
Exchange oldExchange = originalExchange;
Integer size = 1;
if (oldExchange != null) {
// working when using an identify based approach for optimistic locking like the MemoryAggregationRepository.
if (optimisticLocking && aggregationRepository instanceof MemoryAggregationRepository) {
oldExchange = originalExchange.copy();
}
size = oldExchange.getProperty(Exchange.AGGREGATED_SIZE, 0, Integer.class);
size++;
}
// prepare the exchanges for aggregation
ExchangeHelper.prepareAggregation(oldExchange, newExchange);
// check if we are pre complete
if (preCompletion) {
try {
// put the current aggregated size on the exchange so its avail during completion check
newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
complete = isPreCompleted(key, oldExchange, newExchange);
// make sure to track timeouts if not complete
if (complete == null) {
trackTimeout(key, newExchange);
}
// remove it afterwards
newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
} catch (Throwable e) {
// must catch any exception from aggregation
throw new CamelExchangeException("Error occurred during preComplete", newExchange, e);
}
} else if (isEagerCheckCompletion()) {
// put the current aggregated size on the exchange so its avail during completion check
newExchange.setProperty(Exchange.AGGREGATED_SIZE, size);
complete = isCompleted(key, newExchange);
// make sure to track timeouts if not complete
if (complete == null) {
trackTimeout(key, newExchange);
}
// remove it afterwards
newExchange.removeProperty(Exchange.AGGREGATED_SIZE);
}
if (preCompletion && complete != null) {
// need to pre complete the current group before we aggregate
doAggregationComplete(complete, list, key, originalExchange, oldExchange);
// as we complete the current group eager, we should indicate the new group is not complete
complete = null;
// and clear old/original exchange as we start on a new group
oldExchange = null;
originalExchange = null;
// and reset the size to 1
size = 1;
// make sure to track timeout as we just restart the correlation group when we are in pre completion mode
trackTimeout(key, newExchange);
}
// aggregate the exchanges
try {
answer = onAggregation(oldExchange, newExchange);
} catch (Throwable e) {
// must catch any exception from aggregation
throw new CamelExchangeException("Error occurred during aggregation", newExchange, e);
}
if (answer == null) {
throw new CamelExchangeException("AggregationStrategy " + aggregationStrategy + " returned null which is not allowed", newExchange);
}
// special for some repository implementations
if (aggregationRepository instanceof RecoverableAggregationRepository) {
boolean valid = oldExchange == null || answer.getExchangeId().equals(oldExchange.getExchangeId());
if (!valid && aggregateRepositoryWarned.compareAndSet(false, true)) {
LOG.warn("AggregationStrategy should return the oldExchange instance instead of the newExchange whenever possible" + " as otherwise this can lead to unexpected behavior with some RecoverableAggregationRepository implementations");
}
}
// update the aggregated size
answer.setProperty(Exchange.AGGREGATED_SIZE, size);
// maybe we should check completion after the aggregation
if (!preCompletion && !isEagerCheckCompletion()) {
complete = isCompleted(key, answer);
// make sure to track timeouts if not complete
if (complete == null) {
trackTimeout(key, newExchange);
}
}
if (complete == null) {
// only need to update aggregation repository if we are not complete
doAggregationRepositoryAdd(newExchange.getContext(), key, originalExchange, answer);
} else {
// if we are complete then add the answer to the list
doAggregationComplete(complete, list, key, originalExchange, answer);
}
LOG.trace("onAggregation +++ end +++ with correlation key: {}", key);
return list;
}
Aggregations