Search in sources :

Example 11 with FailedException

use of org.apache.storm.topology.FailedException in project storm by apache.

the class RocketMqState method updateState.

/**
 * Update the RocketMQ state.
 * @param tuples trident tuples
 * @param collector trident collector
 */
public void updateState(List<TridentTuple> tuples, TridentCollector collector) {
    List<Message> messages = new LinkedList<>();
    for (TridentTuple tuple : tuples) {
        String topic = options.selector.getTopic(tuple);
        String tag = options.selector.getTag(tuple);
        String key = options.mapper.getKeyFromTuple(tuple);
        byte[] value = options.mapper.getValueFromTuple(tuple);
        if (topic == null) {
            LOG.warn("skipping Message with Key = " + key + ", topic selector returned null.");
            continue;
        }
        Message msg = new Message(topic, tag, key, value);
        messages.add(msg);
    }
    try {
        this.producer.send(messages);
    } catch (Exception e) {
        LOG.warn("Batch write failed. Triggering replay.", e);
        collector.reportError(e);
        throw new FailedException(e);
    }
}
Also used : Message(org.apache.rocketmq.common.message.Message) FailedException(org.apache.storm.topology.FailedException) LinkedList(java.util.LinkedList) FailedException(org.apache.storm.topology.FailedException) MQClientException(org.apache.rocketmq.client.exception.MQClientException) TridentTuple(org.apache.storm.trident.tuple.TridentTuple)

Example 12 with FailedException

use of org.apache.storm.topology.FailedException in project storm by apache.

the class TridentSpoutExecutor method execute.

@Override
public void execute(BatchInfo info, Tuple input) {
    // there won't be a BatchInfo for the success stream
    TransactionAttempt attempt = (TransactionAttempt) input.getValue(0);
    if (input.getSourceStreamId().equals(MasterBatchCoordinator.COMMIT_STREAM_ID)) {
        if (attempt.equals(activeBatches.get(attempt.getTransactionId()))) {
            ((ICommitterTridentSpout.Emitter) emitter).commit(attempt);
            activeBatches.remove(attempt.getTransactionId());
        } else {
            throw new FailedException("Received commit for different transaction attempt");
        }
    } else if (input.getSourceStreamId().equals(MasterBatchCoordinator.SUCCESS_STREAM_ID)) {
        // valid to delete before what's been committed since
        // those batches will never be accessed again
        activeBatches.headMap(attempt.getTransactionId()).clear();
        emitter.success(attempt);
    } else {
        collector.setBatch(info.batchId);
        emitter.emitBatch(attempt, input.getValue(1), collector);
        activeBatches.put(attempt.getTransactionId(), attempt);
    }
}
Also used : FailedException(org.apache.storm.topology.FailedException) TransactionAttempt(org.apache.storm.trident.topology.TransactionAttempt)

Example 13 with FailedException

use of org.apache.storm.topology.FailedException in project storm by apache.

the class HBaseMapState method multiPut.

@Override
public void multiPut(List<List<Object>> keys, List<T> values) {
    List<Put> puts = new ArrayList<Put>(keys.size());
    for (int i = 0; i < keys.size(); i++) {
        byte[] hbaseKey = this.options.mapMapper.rowKey(keys.get(i));
        String qualifier = this.options.mapMapper.qualifier(keys.get(i));
        LOG.info("Partiton: {}, Key: {}, Value: {}", new Object[] { this.partitionNum, new String(hbaseKey), new String(this.serializer.serialize(values.get(i))) });
        Put put = new Put(hbaseKey);
        T val = values.get(i);
        put.addColumn(this.options.columnFamily.getBytes(), qualifier.getBytes(), this.serializer.serialize(val));
        puts.add(put);
    }
    try {
        this.table.put(puts);
    } catch (InterruptedIOException e) {
        throw new FailedException("Interrupted while writing to HBase", e);
    } catch (RetriesExhaustedWithDetailsException e) {
        throw new FailedException("Retries exhaused while writing to HBase", e);
    } catch (IOException e) {
        throw new FailedException("IOException while writing to HBase", e);
    }
}
Also used : InterruptedIOException(java.io.InterruptedIOException) RetriesExhaustedWithDetailsException(org.apache.hadoop.hbase.client.RetriesExhaustedWithDetailsException) FailedException(org.apache.storm.topology.FailedException) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) Put(org.apache.hadoop.hbase.client.Put)

Example 14 with FailedException

use of org.apache.storm.topology.FailedException in project storm by apache.

the class HBaseState method updateState.

public void updateState(List<TridentTuple> tuples, TridentCollector collector) {
    List<Mutation> mutations = Lists.newArrayList();
    for (TridentTuple tuple : tuples) {
        byte[] rowKey = options.mapper.rowKey(tuple);
        ColumnList cols = options.mapper.columns(tuple);
        mutations.addAll(hBaseClient.constructMutationReq(rowKey, cols, options.durability));
    }
    try {
        hBaseClient.batchMutate(mutations);
    } catch (Exception e) {
        collector.reportError(e);
        throw new FailedException(e);
    }
}
Also used : FailedException(org.apache.storm.topology.FailedException) Mutation(org.apache.hadoop.hbase.client.Mutation) ColumnList(org.apache.storm.hbase.common.ColumnList) FailedException(org.apache.storm.topology.FailedException) TridentTuple(org.apache.storm.trident.tuple.TridentTuple)

Example 15 with FailedException

use of org.apache.storm.topology.FailedException in project storm by apache.

the class HdfsState method updateIndex.

private void updateIndex(long txId) {
    LOG.debug("Starting index update.");
    final Path tmpPath = tmpFilePath(indexFilePath.toString());
    try (FSDataOutputStream out = this.options.fs.create(tmpPath, true);
        BufferedWriter bw = new BufferedWriter(new OutputStreamWriter(out))) {
        TxnRecord txnRecord = new TxnRecord(txId, options.currentFile.toString(), this.options.getCurrentOffset());
        bw.write(txnRecord.toString());
        bw.newLine();
        bw.flush();
        out.close();
        /* In non error scenarios, for the Azure Data Lake Store File System (adl://),
                               the output stream must be closed before the file associated with it is deleted.
                               For ADLFS deleting the file also removes any handles to the file, hence out.close() will fail. */
        /*
             * Delete the current index file and rename the tmp file to atomically
             * replace the index file. Orphan .tmp files are handled in getTxnRecord.
             */
        options.fs.delete(this.indexFilePath, false);
        options.fs.rename(tmpPath, this.indexFilePath);
        lastSeenTxn = txnRecord;
        LOG.debug("updateIndex updated lastSeenTxn to [{}]", this.lastSeenTxn);
    } catch (IOException e) {
        LOG.warn("Begin commit failed due to IOException. Failing batch", e);
        throw new FailedException(e);
    }
}
Also used : Path(org.apache.hadoop.fs.Path) FailedException(org.apache.storm.topology.FailedException) OutputStreamWriter(java.io.OutputStreamWriter) FSDataOutputStream(org.apache.hadoop.fs.FSDataOutputStream) IOException(java.io.IOException) BufferedWriter(java.io.BufferedWriter)

Aggregations

FailedException (org.apache.storm.topology.FailedException)27 TridentTuple (org.apache.storm.trident.tuple.TridentTuple)11 ArrayList (java.util.ArrayList)10 Values (org.apache.storm.tuple.Values)8 List (java.util.List)5 IOException (java.io.IOException)4 Document (org.bson.Document)4 Statement (com.datastax.driver.core.Statement)3 Bson (org.bson.conversions.Bson)3 BatchStatement (com.datastax.driver.core.BatchStatement)2 InterruptedIOException (java.io.InterruptedIOException)2 BigInteger (java.math.BigInteger)2 Get (org.apache.hadoop.hbase.client.Get)2 Result (org.apache.hadoop.hbase.client.Result)2 ColumnList (org.apache.storm.hbase.common.ColumnList)2 ReportedFailedException (org.apache.storm.topology.ReportedFailedException)2 ResultSet (com.datastax.driver.core.ResultSet)1 Row (com.datastax.driver.core.Row)1 BufferedWriter (java.io.BufferedWriter)1 OutputStreamWriter (java.io.OutputStreamWriter)1