Search in sources :

Example 76 with Increment

use of org.apache.hadoop.hbase.client.Increment in project storm-hbase by jrkinley.

the class TupleTableConfig method getIncrementFromTuple.

/**
 * Creates a HBase {@link Increment} from a Storm {@link Tuple}
 * @param tuple The {@link Tuple}
 * @param increment The amount to increment the counter by
 * @return {@link Increment}
 */
public Increment getIncrementFromTuple(final Tuple tuple, final long increment) {
    byte[] rowKey = Bytes.toBytes(tuple.getStringByField(tupleRowKeyField));
    Increment inc = new Increment(rowKey);
    inc.setWriteToWAL(writeToWAL);
    if (columnFamilies.size() > 0) {
        for (String cf : columnFamilies.keySet()) {
            byte[] cfBytes = Bytes.toBytes(cf);
            for (String cq : columnFamilies.get(cf)) {
                byte[] val;
                try {
                    val = Bytes.toBytes(tuple.getStringByField(cq));
                } catch (IllegalArgumentException ex) {
                    // if cq isn't a tuple field, use cq for counter instead of tuple
                    // value
                    val = Bytes.toBytes(cq);
                }
                inc.addColumn(cfBytes, val, increment);
            }
        }
    }
    return inc;
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment)

Example 77 with Increment

use of org.apache.hadoop.hbase.client.Increment in project storm-hbase by jrkinley.

the class TestSerialisation method testAddIncrement.

@Test
public void testAddIncrement() {
    Increment i = new Increment(KEY);
    // set counter to 1
    i.addColumn(CF, CQ1, 1);
    // overrides counter, so its still 1
    i.addColumn(CF, CQ1, 1);
    Assert.assertEquals(1L, (long) i.getFamilyMap().get(CF).get(CQ1));
    // increment counter by 2
    TupleTableConfig.addIncrement(i, CF, CQ1, 2L);
    // increment different
    TupleTableConfig.addIncrement(i, CF, CQ2, 2L);
    // qualifier by 2
    Assert.assertEquals(3L, (long) i.getFamilyMap().get(CF).get(CQ1));
    Assert.assertEquals(2L, (long) i.getFamilyMap().get(CF).get(CQ2));
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment) Test(org.junit.Test)

Example 78 with Increment

use of org.apache.hadoop.hbase.client.Increment in project storm-hbase by jrkinley.

the class HBaseCountersBatchBolt method execute.

/**
 * {@inheritDoc}
 */
@Override
public void execute(Tuple tuple) {
    Increment newInc = conf.getIncrementFromTuple(tuple, TupleTableConfig.DEFAULT_INCREMENT);
    Increment extInc = counters.get(newInc.getRow());
    if (extInc != null) {
        // Increment already exists for row, add newInc to extInc
        for (Entry<byte[], NavigableMap<byte[], Long>> families : newInc.getFamilyMap().entrySet()) {
            for (Entry<byte[], Long> columns : families.getValue().entrySet()) {
                TupleTableConfig.addIncrement(extInc, families.getKey(), columns.getKey(), columns.getValue());
            }
        }
        counters.put(newInc.getRow(), extInc);
    } else {
        counters.put(newInc.getRow(), newInc);
    }
}
Also used : NavigableMap(java.util.NavigableMap) Increment(org.apache.hadoop.hbase.client.Increment)

Example 79 with Increment

use of org.apache.hadoop.hbase.client.Increment in project storm-hbase by jrkinley.

the class HBaseCountersBatchBolt method finishBatch.

/**
 * {@inheritDoc}
 */
@Override
public void finishBatch() {
    try {
        connector = new HTableConnector(conf);
    } catch (IOException ex) {
        throw new RuntimeException(ex);
    }
    if (LOG.isDebugEnabled()) {
        LOG.debug("Finishing tx: " + attempt.getTransactionId());
        LOG.debug(String.format("Updating idempotent counters for %d rows in table '%s'", counters.size(), conf.getTableName()));
    }
    for (Increment inc : counters.values()) {
        for (Entry<byte[], NavigableMap<byte[], Long>> e : inc.getFamilyMap().entrySet()) {
            for (Entry<byte[], Long> c : e.getValue().entrySet()) {
                // Get counters latest txid from table
                byte[] txidCQ = txidQualifier(c.getKey());
                BigInteger latestTxid = getLatestTxid(inc.getRow(), e.getKey(), txidCQ);
                long counter = c.getValue();
                if (latestTxid == null || !latestTxid.equals(attempt.getTransactionId())) {
                    // txids are different so safe to increment counter
                    try {
                        counter = connector.getTable().incrementColumnValue(inc.getRow(), e.getKey(), c.getKey(), c.getValue(), conf.isWriteToWAL());
                    } catch (IOException ex) {
                        throw new RuntimeException(String.format("Unable to increment counter: %s, %s, %s", Bytes.toString(inc.getRow()), Bytes.toString(e.getKey()), Bytes.toString(c.getKey())), ex);
                    }
                    putLatestTxid(inc.getRow(), e.getKey(), txidCQ);
                    collector.emit(new Values(inc.getRow(), e.getKey(), c.getKey(), counter));
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(String.format("txids for counter %s, %s, %s are different [%d, %d], incrementing", Bytes.toString(inc.getRow()), Bytes.toString(e.getKey()), Bytes.toString(c.getKey()), latestTxid, attempt.getTransactionId()));
                    }
                } else {
                    if (LOG.isDebugEnabled()) {
                        LOG.debug(String.format("txids for counter %s, %s, %s are the same [%d], skipping", Bytes.toString(inc.getRow()), Bytes.toString(e.getKey()), Bytes.toString(c.getKey()), latestTxid));
                    }
                }
            }
        }
    }
}
Also used : HTableConnector(backtype.storm.contrib.hbase.utils.HTableConnector) NavigableMap(java.util.NavigableMap) Increment(org.apache.hadoop.hbase.client.Increment) Values(backtype.storm.tuple.Values) BigInteger(java.math.BigInteger) IOException(java.io.IOException)

Example 80 with Increment

use of org.apache.hadoop.hbase.client.Increment in project metron by apache.

the class HBaseClient method addMutation.

/**
 * Add a Mutation such as a Put or Increment to the batch.  The Mutation is only queued for
 * later execution.
 *
 * @param rowKey     The row key of the Mutation.
 * @param cols       The columns affected by the Mutation.
 * @param durability The durability of the mutation.
 */
public void addMutation(byte[] rowKey, ColumnList cols, Durability durability) {
    if (cols.hasColumns()) {
        Put put = createPut(rowKey, cols, durability);
        mutations.add(put);
    }
    if (cols.hasCounters()) {
        Increment inc = createIncrement(rowKey, cols, durability);
        mutations.add(inc);
    }
    if (mutations.isEmpty()) {
        mutations.add(new Put(rowKey));
    }
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment) Put(org.apache.hadoop.hbase.client.Put)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5