use of org.apache.storm.topology.FailedException in project storm by apache.
the class TransactionalSpoutBatchExecutor method execute.
@Override
public void execute(Tuple input) {
TransactionAttempt attempt = (TransactionAttempt) input.getValue(0);
try {
if (input.getSourceStreamId().equals(TransactionalSpoutCoordinator.TRANSACTION_COMMIT_STREAM_ID)) {
if (attempt.equals(_activeTransactions.get(attempt.getTransactionId()))) {
((ICommitterTransactionalSpout.Emitter) _emitter).commit(attempt);
_activeTransactions.remove(attempt.getTransactionId());
_collector.ack(input);
} else {
_collector.fail(input);
}
} else {
_emitter.emitBatch(attempt, input.getValue(1), _collector);
_activeTransactions.put(attempt.getTransactionId(), attempt);
_collector.ack(input);
BigInteger committed = (BigInteger) input.getValue(2);
if (committed != null) {
// valid to delete before what's been committed since
// those batches will never be accessed again
_activeTransactions.headMap(committed).clear();
_emitter.cleanupBefore(committed);
}
}
} catch (FailedException e) {
LOG.warn("Failed to emit batch for transaction", e);
_collector.fail(input);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class TransactionalSpoutCoordinator method sync.
private void sync() {
// note that sometimes the tuples active may be less than max_spout_pending, e.g.
// max_spout_pending = 3
// tx 1, 2, 3 active, tx 2 is acked. there won't be a commit for tx 2 (because tx 1 isn't committed yet),
// and there won't be a batch for tx 4 because there's max_spout_pending tx active
TransactionStatus maybeCommit = _activeTx.get(_currTransaction);
if (maybeCommit != null && maybeCommit.status == AttemptStatus.PROCESSED) {
maybeCommit.status = AttemptStatus.COMMITTING;
_collector.emit(TRANSACTION_COMMIT_STREAM_ID, new Values(maybeCommit.attempt), maybeCommit.attempt);
}
try {
if (_activeTx.size() < _maxTransactionActive) {
BigInteger curr = _currTransaction;
for (int i = 0; i < _maxTransactionActive; i++) {
if ((_coordinatorState.hasCache(curr) || _coordinator.isReady()) && !_activeTx.containsKey(curr)) {
TransactionAttempt attempt = new TransactionAttempt(curr, _rand.nextLong());
Object state = _coordinatorState.getState(curr, _initializer);
_activeTx.put(curr, new TransactionStatus(attempt));
_collector.emit(TRANSACTION_BATCH_STREAM_ID, new Values(attempt, state, previousTransactionId(_currTransaction)), attempt);
}
curr = nextTransactionId(curr);
}
}
} catch (FailedException e) {
LOG.warn("Failed to get metadata for a transaction", e);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class MongoState method batchRetrieve.
/**
* Batch retrieve values.
* @param tridentTuples trident tuples
* @return values
*/
public List<List<Values>> batchRetrieve(List<TridentTuple> tridentTuples) {
List<List<Values>> batchRetrieveResult = Lists.newArrayList();
try {
for (TridentTuple tuple : tridentTuples) {
Bson filter = options.queryCreator.createFilter(tuple);
Document doc = mongoClient.find(filter);
List<Values> values = options.lookupMapper.toTuple(tuple, doc);
batchRetrieveResult.add(values);
}
} catch (Exception e) {
LOG.warn("Batch get operation failed. Triggering replay.", e);
throw new FailedException(e);
}
return batchRetrieveResult;
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class MongoMapState method multiPut.
@Override
public void multiPut(List<List<Object>> keysList, List<T> values) {
try {
for (int i = 0; i < keysList.size(); i++) {
List<Object> keys = keysList.get(i);
T value = values.get(i);
Bson filter = options.queryCreator.createFilterByKeys(keys);
Document document = options.mapper.toDocumentByKeys(keys);
document.append(options.serDocumentField, this.serializer.serialize(value));
this.mongoClient.update(filter, document, true, false);
}
} catch (Exception e) {
LOG.warn("Batch write operation failed.", e);
throw new FailedException(e);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class TridentBoltExecutor method finishBatch.
private boolean finishBatch(TrackedBatch tracked, Tuple finishTuple) {
boolean success = true;
try {
bolt.finishBatch(tracked.info);
String stream = coordStream(tracked.info.batchGroup);
for (Integer task : tracked.condition.targetTasks) {
collector.emitDirect(task, stream, finishTuple, new Values(tracked.info.batchId, Utils.get(tracked.taskEmittedTuples, task, 0)));
}
if (tracked.delayedAck != null) {
collector.ack(tracked.delayedAck);
tracked.delayedAck = null;
}
} catch (FailedException e) {
failBatch(tracked, e);
success = false;
}
batches.remove(tracked.info.batchId.getId());
return success;
}
Aggregations