use of org.apache.storm.topology.FailedException in project storm by apache.
the class CoordinatedBolt method checkFinishId.
private boolean checkFinishId(Tuple tup, TupleType type) {
Object id = tup.getValue(0);
boolean failed = false;
synchronized (tracked) {
TrackingInfo track = tracked.get(id);
try {
if (track != null) {
boolean delayed = false;
if (idStreamSpec == null && type == TupleType.COORD || idStreamSpec != null && type == TupleType.ID) {
track.ackTuples.add(tup);
delayed = true;
}
if (track.failed) {
failed = true;
for (Tuple t : track.ackTuples) {
collector.fail(t);
}
tracked.remove(id);
} else if (track.receivedId && (sourceArgs.isEmpty() || track.reportCount == numSourceReports && track.expectedTupleCount == track.receivedTuples)) {
if (delegate instanceof FinishedCallback) {
((FinishedCallback) delegate).finishedId(id);
}
if (!(sourceArgs.isEmpty() || type != TupleType.REGULAR)) {
throw new IllegalStateException("Coordination condition met on a non-coordinating tuple. Should be impossible");
}
Iterator<Integer> outTasks = countOutTasks.iterator();
while (outTasks.hasNext()) {
int task = outTasks.next();
int numTuples = Utils.get(track.taskEmittedTuples, task, 0);
collector.emitDirect(task, Constants.COORDINATED_STREAM_ID, tup, new Values(id, numTuples));
}
for (Tuple t : track.ackTuples) {
collector.ack(t);
}
track.finished = true;
tracked.remove(id);
}
if (!delayed && type != TupleType.REGULAR) {
if (track.failed) {
collector.fail(tup);
} else {
collector.ack(tup);
}
}
} else {
if (type != TupleType.REGULAR) {
collector.fail(tup);
}
}
} catch (FailedException e) {
LOG.error("Failed to finish batch", e);
for (Tuple t : track.ackTuples) {
collector.fail(t);
}
tracked.remove(id);
failed = true;
}
}
return failed;
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class CassandraState method updateState.
public void updateState(List<TridentTuple> tuples, final TridentCollector collector) {
List<Statement> statements = new ArrayList<>();
for (TridentTuple tuple : tuples) {
statements.addAll(options.cqlStatementTupleMapper.map(conf, session, tuple));
}
try {
if (options.batchingType != null) {
BatchStatement batchStatement = new BatchStatement(options.batchingType);
batchStatement.addAll(statements);
session.execute(batchStatement);
} else {
for (Statement statement : statements) {
session.execute(statement);
}
}
} catch (Exception e) {
LOG.warn("Batch write operation is failed.");
collector.reportError(e);
throw new FailedException(e);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class CassandraState method batchRetrieve.
public List<List<Values>> batchRetrieve(List<TridentTuple> tridentTuples) {
Preconditions.checkNotNull(options.cqlResultSetValuesMapper, "CassandraState.Options should have cqlResultSetValuesMapper");
List<List<Values>> batchRetrieveResult = new ArrayList<>();
try {
for (TridentTuple tridentTuple : tridentTuples) {
List<Statement> statements = options.cqlStatementTupleMapper.map(conf, session, tridentTuple);
for (Statement statement : statements) {
List<List<Values>> values = options.cqlResultSetValuesMapper.map(session, statement, tridentTuple);
batchRetrieveResult.addAll(values);
}
}
} catch (Exception e) {
LOG.warn("Batch retrieve operation is failed", e);
throw new FailedException(e);
}
return batchRetrieveResult;
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class CassandraBackingMap method multiPut.
@Override
public void multiPut(List<List<Object>> keys, List<T> values) {
LOG.debug("multiPut writing {} values.", keys.size());
List<Statement> statements = new ArrayList<>();
for (int i = 0; i < keys.size(); i++) {
Values stateValues = options.stateMapper.toValues(values.get(i));
SimpleTuple tuple = new SimpleTuple(allFields, keys.get(i), stateValues);
statements.addAll(options.putMapper.map(conf, session, tuple));
}
try {
putResultMapper.map(session, statements, null);
} catch (Exception e) {
LOG.warn("Write operation failed: {}", e.getMessage());
throw new FailedException(e);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class EsState method updateState.
/**
* Store current state to ElasticSearch.
*
* @param tuples list of tuples for storing to ES.
* Each tuple should have relevant fields (source, index, type, id) for EsState's tupleMapper to extract ES document.
*/
public void updateState(List<TridentTuple> tuples) {
try {
String bulkRequest = buildRequest(tuples);
Response response = client.performRequest("post", "_bulk", new HashMap<>(), new StringEntity(bulkRequest.toString()));
BulkIndexResponse bulkResponse = objectMapper.readValue(response.getEntity().getContent(), BulkIndexResponse.class);
if (bulkResponse.hasErrors()) {
LOG.warn("failed processing bulk index requests: " + bulkResponse.getFirstError() + ": " + bulkResponse.getFirstResult());
throw new FailedException();
}
} catch (IOException e) {
LOG.warn("failed processing bulk index requests: " + e.toString());
throw new FailedException(e);
}
}
Aggregations