use of org.apache.storm.topology.FailedException in project storm by apache.
the class RocketMqState method updateState.
/**
* Update the RocketMQ state.
* @param tuples trident tuples
* @param collector trident collector
*/
public void updateState(List<TridentTuple> tuples, TridentCollector collector) {
List<Message> messages = new LinkedList<>();
for (TridentTuple tuple : tuples) {
String topic = options.selector.getTopic(tuple);
String tag = options.selector.getTag(tuple);
String key = options.mapper.getKeyFromTuple(tuple);
byte[] value = options.mapper.getValueFromTuple(tuple);
if (topic == null) {
LOG.warn("skipping Message with Key = " + key + ", topic selector returned null.");
continue;
}
Message msg = new Message(topic, tag, key, value);
messages.add(msg);
}
try {
this.producer.send(messages);
} catch (Exception e) {
LOG.warn("Batch write failed. Triggering replay.", e);
collector.reportError(e);
throw new FailedException(e);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class TridentSpoutExecutor method execute.
@Override
public void execute(BatchInfo info, Tuple input) {
// there won't be a BatchInfo for the success stream
TransactionAttempt attempt = (TransactionAttempt) input.getValue(0);
if (input.getSourceStreamId().equals(MasterBatchCoordinator.COMMIT_STREAM_ID)) {
if (attempt.equals(activeBatches.get(attempt.getTransactionId()))) {
((ICommitterTridentSpout.Emitter) emitter).commit(attempt);
activeBatches.remove(attempt.getTransactionId());
} else {
throw new FailedException("Received commit for different transaction attempt");
}
} else if (input.getSourceStreamId().equals(MasterBatchCoordinator.SUCCESS_STREAM_ID)) {
// valid to delete before what's been committed since
// those batches will never be accessed again
activeBatches.headMap(attempt.getTransactionId()).clear();
emitter.success(attempt);
} else {
collector.setBatch(info.batchId);
emitter.emitBatch(attempt, input.getValue(1), collector);
activeBatches.put(attempt.getTransactionId(), attempt);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class HBaseMapState method multiPut.
@Override
public void multiPut(List<List<Object>> keys, List<T> values) {
List<Put> puts = new ArrayList<Put>(keys.size());
for (int i = 0; i < keys.size(); i++) {
byte[] hbaseKey = this.options.mapMapper.rowKey(keys.get(i));
String qualifier = this.options.mapMapper.qualifier(keys.get(i));
LOG.info("Partiton: {}, Key: {}, Value: {}", new Object[] { this.partitionNum, new String(hbaseKey), new String(this.serializer.serialize(values.get(i))) });
Put put = new Put(hbaseKey);
T val = values.get(i);
put.addColumn(this.options.columnFamily.getBytes(), qualifier.getBytes(), this.serializer.serialize(val));
puts.add(put);
}
try {
this.table.put(puts);
} catch (InterruptedIOException e) {
throw new FailedException("Interrupted while writing to HBase", e);
} catch (RetriesExhaustedWithDetailsException e) {
throw new FailedException("Retries exhaused while writing to HBase", e);
} catch (IOException e) {
throw new FailedException("IOException while writing to HBase", e);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class HBaseState method updateState.
public void updateState(List<TridentTuple> tuples, TridentCollector collector) {
List<Mutation> mutations = Lists.newArrayList();
for (TridentTuple tuple : tuples) {
byte[] rowKey = options.mapper.rowKey(tuple);
ColumnList cols = options.mapper.columns(tuple);
mutations.addAll(hBaseClient.constructMutationReq(rowKey, cols, options.durability));
}
try {
hBaseClient.batchMutate(mutations);
} catch (Exception e) {
collector.reportError(e);
throw new FailedException(e);
}
}
use of org.apache.storm.topology.FailedException in project storm by apache.
the class HdfsState method updateIndex.
private void updateIndex(long txId) {
LOG.debug("Starting index update.");
final Path tmpPath = tmpFilePath(indexFilePath.toString());
try (FSDataOutputStream out = this.options.fs.create(tmpPath, true);
BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out))) {
TxnRecord txnRecord = new TxnRecord(txId, options.currentFile.toString(), this.options.getCurrentOffset());
bw.write(txnRecord.toString());
bw.newLine();
bw.flush();
out.close();
/* In non error scenarios, for the Azure Data Lake Store File System (adl://),
the output stream must be closed before the file associated with it is deleted.
For ADLFS deleting the file also removes any handles to the file, hence out.close() will fail. */
/*
* Delete the current index file and rename the tmp file to atomically
* replace the index file. Orphan .tmp files are handled in getTxnRecord.
*/
options.fs.delete(this.indexFilePath, false);
options.fs.rename(tmpPath, this.indexFilePath);
lastSeenTxn = txnRecord;
LOG.debug("updateIndex updated lastSeenTxn to [{}]", this.lastSeenTxn);
} catch (IOException e) {
LOG.warn("Begin commit failed due to IOException. Failing batch", e);
throw new FailedException(e);
}
}
Aggregations