use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.
the class DefaultBulkUpdaterTest method singleTableConcurrent.
@Test
public void singleTableConcurrent() throws Exception {
// Given
TableName tableA = TableName.valueOf("A");
TestDataSet testDataSetA_0_0 = new TestDataSet(tableA, 0, 0, 1000000);
TestDataSet testDataSetA_0_1 = new TestDataSet(tableA, 0, 1, 1000001);
List<TestData> testDatas = new ArrayList<>();
testDatas.addAll(testDataSetA_0_0.getTestDatas());
testDatas.addAll(testDataSetA_0_1.getTestDatas());
Collections.shuffle(testDatas);
// When
final int numIncrementers = 16;
List<List<BulkIncrementerTestClazz.TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
final CountDownLatch completeLatch = new CountDownLatch(testDataPartitions.size());
final CountDownLatch flusherLatch = new CountDownLatch(1);
FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, completeLatch, flusherLatch));
new Thread(flushTask, "Flusher").start();
int counter = 0;
for (List<TestData> testDataPartition : testDataPartitions) {
Incrementer incrementer = new Incrementer(bulkIncrementer, completeLatch, testDataPartition);
new Thread(incrementer, "Incrementer-" + counter++).start();
}
flusherLatch.await(30L, TimeUnit.SECONDS);
// Then
Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
TestVerifier verifier = new TestVerifier(incrementMap);
verifier.verify(testDataSetA_0_0);
verifier.verify(testDataSetA_0_1);
}
use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.
the class DefaultBulkUpdaterTest method multipleTablesConcurrent.
@Test
public void multipleTablesConcurrent() throws Exception {
// Given
final int numTables = 50;
final int numRowIds = 100;
final int numColumnIds = 20;
final int maxCallCount = 200;
List<TestDataSet> testDataSets = BulkIncrementerTestClazz.createRandomTestDataSetList(numTables, numRowIds, numColumnIds, maxCallCount);
List<TableName> tableNames = new ArrayList<>(numTables);
for (int i = 0; i < numTables; i++) {
tableNames.add(TableName.valueOf(i + ""));
}
final int maxNumTestDatas = numTables * numRowIds * numColumnIds * maxCallCount;
List<TestData> testDatas = new ArrayList<>(maxNumTestDatas);
for (TestDataSet testDataSet : testDataSets) {
testDatas.addAll(testDataSet.getTestDatas());
}
Collections.shuffle(testDatas);
// When
final int numIncrementers = 16;
List<List<TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
final CountDownLatch incrementorLatch = new CountDownLatch(testDataPartitions.size());
final CountDownLatch flusherLatch = new CountDownLatch(1);
FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, incrementorLatch, flusherLatch));
new Thread(flushTask, "Flusher").start();
int counter = 0;
for (List<TestData> testDataPartition : testDataPartitions) {
Incrementer incrementer = new Incrementer(bulkIncrementer, incrementorLatch, testDataPartition);
new Thread(incrementer, "Incrementer-" + counter++).start();
}
flusherLatch.await(30L, TimeUnit.SECONDS);
// Then
Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
TestVerifier verifier = new TestVerifier(incrementMap);
for (TestDataSet testDataSet : testDataSets) {
verifier.verify(testDataSet);
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class HRegion method doMiniBatchMutate.
/**
* Called to do a piece of the batch that came in to {@link #batchMutate(Mutation[])}
* In here we also handle replay of edits on region recover. Also gets change in size brought
* about by applying {@code batchOp}.
*/
private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException {
boolean success = false;
WALEdit walEdit = null;
WriteEntry writeEntry = null;
boolean locked = false;
// We try to set up a batch in the range [batchOp.nextIndexToProcess,lastIndexExclusive)
MiniBatchOperationInProgress<Mutation> miniBatchOp = null;
/**
* Keep track of the locks we hold so we can release them in finally clause
*/
List<RowLock> acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.size());
// Check for thread interrupt status in case we have been signaled from
// #interruptRegionOperation.
checkInterrupt();
try {
// STEP 1. Try to acquire as many locks as we can and build mini-batch of operations with
// locked rows
miniBatchOp = batchOp.lockRowsAndBuildMiniBatch(acquiredRowLocks);
// Ensure we acquire at least one.
if (miniBatchOp.getReadyToWriteCount() <= 0) {
// NoSuchColumnFamily?
return;
}
// Check for thread interrupt status in case we have been signaled from
// #interruptRegionOperation. Do it before we take the lock and disable interrupts for
// the WAL append.
checkInterrupt();
lock(this.updatesLock.readLock(), miniBatchOp.getReadyToWriteCount());
locked = true;
// From this point until memstore update this operation should not be interrupted.
disableInterrupts();
// STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp
// We should record the timestamp only after we have acquired the rowLock,
// otherwise, newer puts/deletes/increment/append are not guaranteed to have a newer
// timestamp
long now = EnvironmentEdgeManager.currentTime();
batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks);
// STEP 3. Build WAL edit
List<Pair<NonceKey, WALEdit>> walEdits = batchOp.buildWALEdits(miniBatchOp);
for (Iterator<Pair<NonceKey, WALEdit>> it = walEdits.iterator(); it.hasNext(); ) {
Pair<NonceKey, WALEdit> nonceKeyWALEditPair = it.next();
walEdit = nonceKeyWALEditPair.getSecond();
NonceKey nonceKey = nonceKeyWALEditPair.getFirst();
if (walEdit != null && !walEdit.isEmpty()) {
writeEntry = doWALAppend(walEdit, batchOp.durability, batchOp.getClusterIds(), now, nonceKey.getNonceGroup(), nonceKey.getNonce(), batchOp.getOrigLogSeqNum());
}
// Complete mvcc for all but last writeEntry (for replay case)
if (it.hasNext() && writeEntry != null) {
mvcc.complete(writeEntry);
writeEntry = null;
}
}
// STEP 5. Write back to memStore
// NOTE: writeEntry can be null here
writeEntry = batchOp.writeMiniBatchOperationsToMemStore(miniBatchOp, writeEntry);
// STEP 6. Complete MiniBatchOperations: If required calls postBatchMutate() CP hook and
// complete mvcc for last writeEntry
batchOp.completeMiniBatchOperations(miniBatchOp, writeEntry);
writeEntry = null;
success = true;
} finally {
// Call complete rather than completeAndWait because we probably had error if walKey != null
if (writeEntry != null)
mvcc.complete(writeEntry);
if (locked) {
this.updatesLock.readLock().unlock();
}
releaseRowLocks(acquiredRowLocks);
enableInterrupts();
final int finalLastIndexExclusive = miniBatchOp != null ? miniBatchOp.getLastIndexExclusive() : batchOp.size();
final boolean finalSuccess = success;
batchOp.visitBatchOperations(true, finalLastIndexExclusive, (int i) -> {
Mutation mutation = batchOp.getMutation(i);
if (mutation instanceof Increment || mutation instanceof Append) {
if (finalSuccess) {
batchOp.retCodeDetails[i] = new OperationStatus(OperationStatusCode.SUCCESS, batchOp.results[i]);
} else {
batchOp.retCodeDetails[i] = OperationStatus.FAILURE;
}
} else {
batchOp.retCodeDetails[i] = finalSuccess ? OperationStatus.SUCCESS : OperationStatus.FAILURE;
}
return true;
});
batchOp.doPostOpCleanupForMiniBatch(miniBatchOp, walEdit, finalSuccess);
batchOp.nextIndexToProcess = finalLastIndexExclusive;
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestPassCustomCellViaRegionObserver method testMutation.
@Test
public void testMutation() throws Exception {
Put put = new Put(ROW);
put.addColumn(FAMILY, QUALIFIER, VALUE);
table.put(put);
byte[] value = VALUE;
assertResult(table.get(new Get(ROW)), value, value);
assertObserverHasExecuted();
Increment inc = new Increment(ROW);
inc.addColumn(FAMILY, QUALIFIER, 10L);
table.increment(inc);
// QUALIFIER -> 10 (put) + 10 (increment)
// QUALIFIER_FROM_CP -> 10 (from cp's put) + 10 (from cp's increment)
value = Bytes.toBytes(20L);
assertResult(table.get(new Get(ROW)), value, value);
assertObserverHasExecuted();
Append append = new Append(ROW);
append.addColumn(FAMILY, QUALIFIER, APPEND_VALUE);
table.append(append);
// 10L + "MB"
value = ByteBuffer.wrap(new byte[value.length + APPEND_VALUE.length]).put(value).put(APPEND_VALUE).array();
assertResult(table.get(new Get(ROW)), value, value);
assertObserverHasExecuted();
Delete delete = new Delete(ROW);
delete.addColumns(FAMILY, QUALIFIER);
table.delete(delete);
assertTrue(Arrays.asList(table.get(new Get(ROW)).rawCells()).toString(), table.get(new Get(ROW)).isEmpty());
assertObserverHasExecuted();
assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put));
assertObserverHasExecuted();
assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenDelete(delete));
assertObserverHasExecuted();
assertTrue(table.get(new Get(ROW)).isEmpty());
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestPostIncrementAndAppendBeforeWAL method testIncrementTTLWithACLTag.
@Test
public void testIncrementTTLWithACLTag() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
createTableWithCoprocessor(tableName, ChangeCellWithACLTagObserver.class.getName());
try (Table table = connection.getTable(tableName)) {
// Increment without TTL
Increment firstIncrement = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1).setACL(USER, PERMS);
Result result = table.increment(firstIncrement);
assertEquals(1, result.size());
assertEquals(1, Bytes.toLong(result.getValue(CF1_BYTES, CQ1)));
// Check if the new cell can be read
Get get = new Get(ROW).addColumn(CF1_BYTES, CQ1);
result = table.get(get);
assertEquals(1, result.size());
assertEquals(1, Bytes.toLong(result.getValue(CF1_BYTES, CQ1)));
// Increment with TTL
Increment secondIncrement = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1).setTTL(1000).setACL(USER, PERMS);
result = table.increment(secondIncrement);
// We should get value 2 here
assertEquals(1, result.size());
assertEquals(2, Bytes.toLong(result.getValue(CF1_BYTES, CQ1)));
// Wait 4s to let the second increment expire
Thread.sleep(4000);
get = new Get(ROW).addColumn(CF1_BYTES, CQ1);
result = table.get(get);
// The value should revert to 1
assertEquals(1, result.size());
assertEquals(1, Bytes.toLong(result.getValue(CF1_BYTES, CQ1)));
}
}
Aggregations