use of org.apache.storm.topology.FailedException in project storm by apache.
the class TridentBoltExecutor method execute.
@Override
public void execute(Tuple tuple) {
if (TupleUtils.isTick(tuple)) {
long now = System.currentTimeMillis();
if (now - lastRotate > messageTimeoutMs) {
batches.rotate();
lastRotate = now;
}
return;
}
String batchGroup = batchGroupIds.get(tuple.getSourceGlobalStreamId());
if (batchGroup == null) {
// this is so we can do things like have simple DRPC that doesn't need to use batch processing
coordCollector.setCurrBatch(null);
bolt.execute(null, tuple);
collector.ack(tuple);
return;
}
IBatchID id = (IBatchID) tuple.getValue(0);
// get transaction id
// if it already exists and attempt id is greater than the attempt there
TrackedBatch tracked = (TrackedBatch) batches.get(id.getId());
// failures happen you don't get an explosion in memory usage in the tasks
if (tracked != null) {
if (id.getAttemptId() > tracked.attemptId) {
batches.remove(id.getId());
tracked = null;
} else if (id.getAttemptId() < tracked.attemptId) {
// no reason to try to execute a previous attempt than we've already seen
return;
}
}
if (tracked == null) {
tracked = new TrackedBatch(new BatchInfo(batchGroup, id, bolt.initBatchState(batchGroup, id)), coordConditions.get(batchGroup), id.getAttemptId());
batches.put(id.getId(), tracked);
}
coordCollector.setCurrBatch(tracked);
// System.out.println("TRACKED: " + tracked + " " + tuple);
TupleType t = getTupleType(tuple, tracked);
if (t == TupleType.COMMIT) {
tracked.receivedCommit = true;
checkFinish(tracked, tuple, t);
} else if (t == TupleType.COORD) {
int count = tuple.getInteger(1);
tracked.reportedTasks++;
tracked.expectedTupleCount += count;
checkFinish(tracked, tuple, t);
} else {
tracked.receivedTuples++;
boolean success = true;
try {
bolt.execute(tracked.info, tuple);
if (tracked.condition.expectedTaskReports == 0) {
success = finishBatch(tracked, tuple);
}
} catch (FailedException e) {
failBatch(tracked, e);
}
if (success) {
collector.ack(tuple);
} else {
collector.fail(tuple);
}
}
coordCollector.setCurrBatch(null);
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class WindowsStateUpdater method updateState.
@Override
public void updateState(WindowsState state, List<TridentTuple> tuples, TridentCollector collector) {
Long currentTxId = state.getCurrentTxId();
LOG.debug("Removing triggers using WindowStateUpdater, txnId: [{}] ", currentTxId);
for (TridentTuple tuple : tuples) {
try {
Object fieldValue = tuple.getValueByField(WindowTridentProcessor.TRIGGER_FIELD_NAME);
if (!(fieldValue instanceof WindowTridentProcessor.TriggerInfo)) {
throw new IllegalClassException(WindowTridentProcessor.TriggerInfo.class, fieldValue.getClass());
}
WindowTridentProcessor.TriggerInfo triggerInfo = (WindowTridentProcessor.TriggerInfo) fieldValue;
String triggerCompletedKey = WindowTridentProcessor.getWindowTriggerInprocessIdPrefix(triggerInfo.windowTaskId) + currentTxId;
LOG.debug("Removing trigger key [{}] and trigger completed key [{}] from store: [{}]", triggerInfo, triggerCompletedKey, windowsStore);
windowsStore.removeAll(Lists.newArrayList(triggerInfo.generateTriggerKey(), triggerCompletedKey));
} catch (Exception ex) {
LOG.warn(ex.getMessage());
collector.reportError(ex);
throw new FailedException(ex);
}
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class TridentAyncCQLResultSetValuesMapper method map.
@Override
public List<List<Values>> map(Session session, List<Statement> statements, final List<ITuple> tuples) {
AsyncExecutor<Integer> executor = AsyncExecutorProvider.getLocal(session, AsyncResultHandler.NO_OP_HANDLER);
final List<Integer> indexes = new ArrayList<>();
final List<List<Values>> results = new ArrayList<>();
for (int i = 0; i < statements.size(); i++) {
indexes.add(i);
results.add(null);
}
SettableFuture<List<Integer>> result = executor.execAsync(statements, indexes, throttle, new AsyncResultSetHandler<Integer>() {
@Override
public void success(Integer index, ResultSet resultSet) {
if (outputDeclaredFields != null) {
List<Values> thisResult = new ArrayList<>();
for (Row row : resultSet) {
final Values values = new Values();
for (String field : outputDeclaredFields) {
ITuple tuple = tuples.get(index);
if (tuple.contains(field)) {
values.add(tuple.getValueByField(field));
} else {
values.add(row.getObject(field));
}
}
thisResult.add(values);
}
results.set(index, thisResult);
}
}
@Override
public void failure(Throwable t, Integer index) {
// Exceptions are captured and thrown at the end of the batch by the executor
}
});
try {
// Await all results
result.get();
} catch (Exception e) {
throw new FailedException(e.getMessage(), e);
}
return results;
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class HBaseMapState method multiGet.
@Override
public List<T> multiGet(List<List<Object>> keys) {
List<Get> gets = new ArrayList<Get>();
for (List<Object> key : keys) {
byte[] hbaseKey = this.options.mapMapper.rowKey(key);
String qualifier = this.options.mapMapper.qualifier(key);
LOG.info("Partition: {}, GET: {}", this.partitionNum, new String(hbaseKey));
Get get = new Get(hbaseKey);
get.addColumn(this.options.columnFamily.getBytes(), qualifier.getBytes());
gets.add(get);
}
List<T> retval = new ArrayList<T>();
try {
Result[] results = this.table.get(gets);
for (int i = 0; i < keys.size(); i++) {
String qualifier = this.options.mapMapper.qualifier(keys.get(i));
Result result = results[i];
byte[] value = result.getValue(this.options.columnFamily.getBytes(), qualifier.getBytes());
if (value != null) {
retval.add(this.serializer.deserialize(value));
} else {
retval.add(null);
}
}
} catch (IOException e) {
throw new FailedException("IOException while reading from HBase.", e);
}
return retval;
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class HBaseState method batchRetrieve.
public List<List<Values>> batchRetrieve(List<TridentTuple> tridentTuples) {
List<List<Values>> batchRetrieveResult = Lists.newArrayList();
List<Get> gets = Lists.newArrayList();
for (TridentTuple tuple : tridentTuples) {
byte[] rowKey = options.mapper.rowKey(tuple);
gets.add(hBaseClient.constructGetRequests(rowKey, options.projectionCriteria));
}
try {
Result[] results = hBaseClient.batchGet(gets);
for (int i = 0; i < results.length; i++) {
Result result = results[i];
TridentTuple tuple = tridentTuples.get(i);
List<Values> values = options.rowToStormValueMapper.toValues(tuple, result);
batchRetrieveResult.add(values);
}
} catch (Exception e) {
LOG.warn("Batch get operation failed. Triggering replay.", e);
throw new FailedException(e);
}
return batchRetrieveResult;
}
Aggregations