use of org.apache.hadoop.hbase.client.Increment in project storm-hbase by jrkinley.
the class TupleTableConfig method getIncrementFromTuple.
/**
* Creates a HBase {@link Increment} from a Storm {@link Tuple}
* @param tuple The {@link Tuple}
* @param increment The amount to increment the counter by
* @return {@link Increment}
*/
public Increment getIncrementFromTuple(final Tuple tuple, final long increment) {
byte[] rowKey = Bytes.toBytes(tuple.getStringByField(tupleRowKeyField));
Increment inc = new Increment(rowKey);
inc.setWriteToWAL(writeToWAL);
if (columnFamilies.size() > 0) {
for (String cf : columnFamilies.keySet()) {
byte[] cfBytes = Bytes.toBytes(cf);
for (String cq : columnFamilies.get(cf)) {
byte[] val;
try {
val = Bytes.toBytes(tuple.getStringByField(cq));
} catch (IllegalArgumentException ex) {
// if cq isn't a tuple field, use cq for counter instead of tuple
// value
val = Bytes.toBytes(cq);
}
inc.addColumn(cfBytes, val, increment);
}
}
}
return inc;
}
use of org.apache.hadoop.hbase.client.Increment in project storm-hbase by jrkinley.
the class TestSerialisation method testAddIncrement.
@Test
public void testAddIncrement() {
Increment i = new Increment(KEY);
// set counter to 1
i.addColumn(CF, CQ1, 1);
// overrides counter, so its still 1
i.addColumn(CF, CQ1, 1);
Assert.assertEquals(1L, (long) i.getFamilyMap().get(CF).get(CQ1));
// increment counter by 2
TupleTableConfig.addIncrement(i, CF, CQ1, 2L);
// increment different
TupleTableConfig.addIncrement(i, CF, CQ2, 2L);
// qualifier by 2
Assert.assertEquals(3L, (long) i.getFamilyMap().get(CF).get(CQ1));
Assert.assertEquals(2L, (long) i.getFamilyMap().get(CF).get(CQ2));
}
use of org.apache.hadoop.hbase.client.Increment in project storm-hbase by jrkinley.
the class HBaseCountersBatchBolt method execute.
/**
* {@inheritDoc}
*/
@Override
public void execute(Tuple tuple) {
Increment newInc = conf.getIncrementFromTuple(tuple, TupleTableConfig.DEFAULT_INCREMENT);
Increment extInc = counters.get(newInc.getRow());
if (extInc != null) {
// Increment already exists for row, add newInc to extInc
for (Entry<byte[], NavigableMap<byte[], Long>> families : newInc.getFamilyMap().entrySet()) {
for (Entry<byte[], Long> columns : families.getValue().entrySet()) {
TupleTableConfig.addIncrement(extInc, families.getKey(), columns.getKey(), columns.getValue());
}
}
counters.put(newInc.getRow(), extInc);
} else {
counters.put(newInc.getRow(), newInc);
}
}
use of org.apache.hadoop.hbase.client.Increment in project storm-hbase by jrkinley.
the class HBaseCountersBatchBolt method finishBatch.
/**
* {@inheritDoc}
*/
@Override
public void finishBatch() {
try {
connector = new HTableConnector(conf);
} catch (IOException ex) {
throw new RuntimeException(ex);
}
if (LOG.isDebugEnabled()) {
LOG.debug("Finishing tx: " + attempt.getTransactionId());
LOG.debug(String.format("Updating idempotent counters for %d rows in table '%s'", counters.size(), conf.getTableName()));
}
for (Increment inc : counters.values()) {
for (Entry<byte[], NavigableMap<byte[], Long>> e : inc.getFamilyMap().entrySet()) {
for (Entry<byte[], Long> c : e.getValue().entrySet()) {
// Get counters latest txid from table
byte[] txidCQ = txidQualifier(c.getKey());
BigInteger latestTxid = getLatestTxid(inc.getRow(), e.getKey(), txidCQ);
long counter = c.getValue();
if (latestTxid == null || !latestTxid.equals(attempt.getTransactionId())) {
// txids are different so safe to increment counter
try {
counter = connector.getTable().incrementColumnValue(inc.getRow(), e.getKey(), c.getKey(), c.getValue(), conf.isWriteToWAL());
} catch (IOException ex) {
throw new RuntimeException(String.format("Unable to increment counter: %s, %s, %s", Bytes.toString(inc.getRow()), Bytes.toString(e.getKey()), Bytes.toString(c.getKey())), ex);
}
putLatestTxid(inc.getRow(), e.getKey(), txidCQ);
collector.emit(new Values(inc.getRow(), e.getKey(), c.getKey(), counter));
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("txids for counter %s, %s, %s are different [%d, %d], incrementing", Bytes.toString(inc.getRow()), Bytes.toString(e.getKey()), Bytes.toString(c.getKey()), latestTxid, attempt.getTransactionId()));
}
} else {
if (LOG.isDebugEnabled()) {
LOG.debug(String.format("txids for counter %s, %s, %s are the same [%d], skipping", Bytes.toString(inc.getRow()), Bytes.toString(e.getKey()), Bytes.toString(c.getKey()), latestTxid));
}
}
}
}
}
}
use of org.apache.hadoop.hbase.client.Increment in project metron by apache.
the class HBaseClient method addMutation.
/**
* Add a Mutation such as a Put or Increment to the batch. The Mutation is only queued for
* later execution.
*
* @param rowKey The row key of the Mutation.
* @param cols The columns affected by the Mutation.
* @param durability The durability of the mutation.
*/
public void addMutation(byte[] rowKey, ColumnList cols, Durability durability) {
if (cols.hasColumns()) {
Put put = createPut(rowKey, cols, durability);
mutations.add(put);
}
if (cols.hasCounters()) {
Increment inc = createIncrement(rowKey, cols, durability);
mutations.add(inc);
}
if (mutations.isEmpty()) {
mutations.add(new Put(rowKey));
}
}
Aggregations