use of org.apache.hadoop.hbase.client.Increment in project phoenix by apache.
the class ConnectionQueryServicesImpl method incrementSequenceValues.
@SuppressWarnings("deprecation")
private void incrementSequenceValues(List<SequenceAllocation> sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException {
List<Sequence> sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size());
for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
SequenceKey key = sequenceAllocation.getSequenceKey();
Sequence newSequences = new Sequence(key);
Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
if (sequence == null) {
sequence = newSequences;
}
sequences.add(sequence);
}
try {
for (Sequence sequence : sequences) {
sequence.getLock().lock();
}
// Now that we have all the locks we need, increment the sequences
List<Increment> incrementBatch = Lists.newArrayListWithExpectedSize(sequences.size());
List<Sequence> toIncrementList = Lists.newArrayListWithExpectedSize(sequences.size());
int[] indexes = new int[sequences.size()];
for (int i = 0; i < sequences.size(); i++) {
Sequence sequence = sequences.get(i);
try {
values[i] = sequence.incrementValue(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
} catch (EmptySequenceCacheException e) {
indexes[toIncrementList.size()] = i;
toIncrementList.add(sequence);
Increment inc = sequence.newIncrement(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
incrementBatch.add(inc);
} catch (SQLException e) {
exceptions[i] = e;
}
}
if (toIncrementList.isEmpty()) {
return;
}
HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
Object[] resultObjects = null;
SQLException sqlE = null;
try {
resultObjects = hTable.batch(incrementBatch);
} catch (IOException e) {
sqlE = ServerUtil.parseServerException(e);
} catch (InterruptedException e) {
// restore the interrupt status
Thread.currentThread().interrupt();
sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
} finally {
try {
hTable.close();
} catch (IOException e) {
if (sqlE == null) {
sqlE = ServerUtil.parseServerException(e);
} else {
sqlE.setNextException(ServerUtil.parseServerException(e));
}
}
if (sqlE != null) {
throw sqlE;
}
}
for (int i = 0; i < resultObjects.length; i++) {
Sequence sequence = toIncrementList.get(i);
Result result = (Result) resultObjects[i];
try {
long numToAllocate = Bytes.toLong(incrementBatch.get(i).getAttribute(SequenceRegionObserver.NUM_TO_ALLOCATE));
values[indexes[i]] = sequence.incrementValue(result, op, numToAllocate);
} catch (SQLException e) {
exceptions[indexes[i]] = e;
}
}
} finally {
for (Sequence sequence : sequences) {
sequence.getLock().unlock();
}
}
}
use of org.apache.hadoop.hbase.client.Increment in project cdap by caskdata.
the class HBaseMetricsTable method incrementAndGet.
@Override
public long incrementAndGet(byte[] row, byte[] column, long delta) {
byte[] distributedKey = createDistributedRowKey(row);
Increment increment = new Increment(distributedKey);
increment.addColumn(columnFamily, column, delta);
try {
Result result = hTable.increment(increment);
return Bytes.toLong(result.getValue(columnFamily, column));
} catch (IOException e) {
// currently there is not other way to extract that from the HBase exception than string match
if (e.getMessage() != null && e.getMessage().contains("isn't 64 bits wide")) {
throw new NumberFormatException("Attempted to increment a value that is not convertible to long," + " row: " + Bytes.toStringBinary(distributedKey) + " column: " + Bytes.toStringBinary(column));
}
throw new DataSetException("IncrementAndGet failed on table " + tableId, e);
}
}
use of org.apache.hadoop.hbase.client.Increment in project storm by apache.
the class HBaseClient method constructMutationReq.
public List<Mutation> constructMutationReq(byte[] rowKey, ColumnList cols, Durability durability) {
List<Mutation> mutations = Lists.newArrayList();
if (cols.hasColumns()) {
Put put = new Put(rowKey);
put.setDurability(durability);
for (ColumnList.Column col : cols.getColumns()) {
if (col.getTs() > 0) {
put.addColumn(col.getFamily(), col.getQualifier(), col.getTs(), col.getValue());
} else {
put.addColumn(col.getFamily(), col.getQualifier(), col.getValue());
}
}
mutations.add(put);
}
if (cols.hasCounters()) {
Increment inc = new Increment(rowKey);
inc.setDurability(durability);
for (ColumnList.Counter cnt : cols.getCounters()) {
inc.addColumn(cnt.getFamily(), cnt.getQualifier(), cnt.getIncrement());
}
mutations.add(inc);
}
if (cols.hasColumnsToDelete()) {
Delete delete = new Delete(rowKey);
delete.setDurability(durability);
for (ColumnList.Column col : cols.getColumnsToDelete()) {
if (col.getTs() > 0) {
delete.addColumn(col.getFamily(), col.getQualifier(), col.getTs());
} else {
delete.addColumn(col.getFamily(), col.getQualifier());
}
}
mutations.add(delete);
}
if (mutations.isEmpty()) {
mutations.add(new Put(rowKey));
}
return mutations;
}
use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.
the class RowKeyMerge method createIncrement.
private Increment createIncrement(Map.Entry<RowKey, List<ColumnName>> rowKeyEntry, RowKeyDistributorByHashPrefix rowKeyDistributorByHashPrefix) {
RowKey rowKey = rowKeyEntry.getKey();
byte[] key = getRowKey(rowKey, rowKeyDistributorByHashPrefix);
final Increment increment = new Increment(key);
for (ColumnName columnName : rowKeyEntry.getValue()) {
increment.addColumn(family, columnName.getColumnName(), columnName.getCallCount());
}
logger.trace("create increment row:{}, column:{}", rowKey, rowKeyEntry.getValue());
return increment;
}
use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.
the class SizeLimitedBulkIncrementerTest method singleTableConcurrent.
@Test
public void singleTableConcurrent() throws Exception {
// Given
TableName tableA = TableName.valueOf("A");
TestDataSet testDataSetA_0_0 = new TestDataSet(tableA, 0, 0, 1000000);
TestDataSet testDataSetA_0_1 = new TestDataSet(tableA, 0, 1, 1000001);
List<TestData> testDatas = new ArrayList<>();
testDatas.addAll(testDataSetA_0_0.getTestDatas());
testDatas.addAll(testDataSetA_0_1.getTestDatas());
Collections.shuffle(testDatas);
// When
final int numIncrementers = 16;
List<List<BulkIncrementerTestClazz.TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
final CountDownLatch completeLatch = new CountDownLatch(testDataPartitions.size());
final CountDownLatch flusherLatch = new CountDownLatch(1);
FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, completeLatch, flusherLatch));
new Thread(flushTask, "Flusher").start();
int counter = 0;
for (List<TestData> testDataPartition : testDataPartitions) {
Incrementer incrementer = new Incrementer(bulkIncrementer, completeLatch, testDataPartition);
new Thread(incrementer, "Incrementer-" + counter++).start();
}
flusherLatch.await(30L, TimeUnit.SECONDS);
// Then
Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
TestVerifier verifier = new TestVerifier(incrementMap);
verifier.verify(testDataSetA_0_0);
verifier.verify(testDataSetA_0_1);
}
Aggregations