Search in sources :

Example 51 with Increment

use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.

the class DefaultBulkUpdaterTest method singleTableConcurrent.

@Test
public void singleTableConcurrent() throws Exception {
    // Given
    TableName tableA = TableName.valueOf("A");
    TestDataSet testDataSetA_0_0 = new TestDataSet(tableA, 0, 0, 1000000);
    TestDataSet testDataSetA_0_1 = new TestDataSet(tableA, 0, 1, 1000001);
    List<TestData> testDatas = new ArrayList<>();
    testDatas.addAll(testDataSetA_0_0.getTestDatas());
    testDatas.addAll(testDataSetA_0_1.getTestDatas());
    Collections.shuffle(testDatas);
    // When
    final int numIncrementers = 16;
    List<List<BulkIncrementerTestClazz.TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
    final CountDownLatch completeLatch = new CountDownLatch(testDataPartitions.size());
    final CountDownLatch flusherLatch = new CountDownLatch(1);
    FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, completeLatch, flusherLatch));
    new Thread(flushTask, "Flusher").start();
    int counter = 0;
    for (List<TestData> testDataPartition : testDataPartitions) {
        Incrementer incrementer = new Incrementer(bulkIncrementer, completeLatch, testDataPartition);
        new Thread(incrementer, "Incrementer-" + counter++).start();
    }
    flusherLatch.await(30L, TimeUnit.SECONDS);
    // Then
    Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
    TestVerifier verifier = new TestVerifier(incrementMap);
    verifier.verify(testDataSetA_0_0);
    verifier.verify(testDataSetA_0_1);
}
Also used : TestData(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestData) Incrementer(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Incrementer) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) TestDataSet(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestDataSet) TableName(org.apache.hadoop.hbase.TableName) FutureTask(java.util.concurrent.FutureTask) Increment(org.apache.hadoop.hbase.client.Increment) Flusher(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Flusher) TestVerifier(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestVerifier) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) Test(org.junit.Test)

Example 52 with Increment

use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.

the class DefaultBulkUpdaterTest method multipleTablesConcurrent.

@Test
public void multipleTablesConcurrent() throws Exception {
    // Given
    final int numTables = 50;
    final int numRowIds = 100;
    final int numColumnIds = 20;
    final int maxCallCount = 200;
    List<TestDataSet> testDataSets = BulkIncrementerTestClazz.createRandomTestDataSetList(numTables, numRowIds, numColumnIds, maxCallCount);
    List<TableName> tableNames = new ArrayList<>(numTables);
    for (int i = 0; i < numTables; i++) {
        tableNames.add(TableName.valueOf(i + ""));
    }
    final int maxNumTestDatas = numTables * numRowIds * numColumnIds * maxCallCount;
    List<TestData> testDatas = new ArrayList<>(maxNumTestDatas);
    for (TestDataSet testDataSet : testDataSets) {
        testDatas.addAll(testDataSet.getTestDatas());
    }
    Collections.shuffle(testDatas);
    // When
    final int numIncrementers = 16;
    List<List<TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
    final CountDownLatch incrementorLatch = new CountDownLatch(testDataPartitions.size());
    final CountDownLatch flusherLatch = new CountDownLatch(1);
    FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, incrementorLatch, flusherLatch));
    new Thread(flushTask, "Flusher").start();
    int counter = 0;
    for (List<TestData> testDataPartition : testDataPartitions) {
        Incrementer incrementer = new Incrementer(bulkIncrementer, incrementorLatch, testDataPartition);
        new Thread(incrementer, "Incrementer-" + counter++).start();
    }
    flusherLatch.await(30L, TimeUnit.SECONDS);
    // Then
    Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
    TestVerifier verifier = new TestVerifier(incrementMap);
    for (TestDataSet testDataSet : testDataSets) {
        verifier.verify(testDataSet);
    }
}
Also used : TestData(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestData) Incrementer(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Incrementer) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) TestDataSet(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestDataSet) TableName(org.apache.hadoop.hbase.TableName) FutureTask(java.util.concurrent.FutureTask) Increment(org.apache.hadoop.hbase.client.Increment) Flusher(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Flusher) TestVerifier(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestVerifier) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) Test(org.junit.Test)

Example 53 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class HRegion method doMiniBatchMutate.

/**
 * Called to do a piece of the batch that came in to {@link #batchMutate(Mutation[])}
 * In here we also handle replay of edits on region recover. Also gets change in size brought
 * about by applying {@code batchOp}.
 */
private void doMiniBatchMutate(BatchOperation<?> batchOp) throws IOException {
    boolean success = false;
    WALEdit walEdit = null;
    WriteEntry writeEntry = null;
    boolean locked = false;
    // We try to set up a batch in the range [batchOp.nextIndexToProcess,lastIndexExclusive)
    MiniBatchOperationInProgress<Mutation> miniBatchOp = null;
    /**
     * Keep track of the locks we hold so we can release them in finally clause
     */
    List<RowLock> acquiredRowLocks = Lists.newArrayListWithCapacity(batchOp.size());
    // Check for thread interrupt status in case we have been signaled from
    // #interruptRegionOperation.
    checkInterrupt();
    try {
        // STEP 1. Try to acquire as many locks as we can and build mini-batch of operations with
        // locked rows
        miniBatchOp = batchOp.lockRowsAndBuildMiniBatch(acquiredRowLocks);
        // Ensure we acquire at least one.
        if (miniBatchOp.getReadyToWriteCount() <= 0) {
            // NoSuchColumnFamily?
            return;
        }
        // Check for thread interrupt status in case we have been signaled from
        // #interruptRegionOperation. Do it before we take the lock and disable interrupts for
        // the WAL append.
        checkInterrupt();
        lock(this.updatesLock.readLock(), miniBatchOp.getReadyToWriteCount());
        locked = true;
        // From this point until memstore update this operation should not be interrupted.
        disableInterrupts();
        // STEP 2. Update mini batch of all operations in progress with LATEST_TIMESTAMP timestamp
        // We should record the timestamp only after we have acquired the rowLock,
        // otherwise, newer puts/deletes/increment/append are not guaranteed to have a newer
        // timestamp
        long now = EnvironmentEdgeManager.currentTime();
        batchOp.prepareMiniBatchOperations(miniBatchOp, now, acquiredRowLocks);
        // STEP 3. Build WAL edit
        List<Pair<NonceKey, WALEdit>> walEdits = batchOp.buildWALEdits(miniBatchOp);
        for (Iterator<Pair<NonceKey, WALEdit>> it = walEdits.iterator(); it.hasNext(); ) {
            Pair<NonceKey, WALEdit> nonceKeyWALEditPair = it.next();
            walEdit = nonceKeyWALEditPair.getSecond();
            NonceKey nonceKey = nonceKeyWALEditPair.getFirst();
            if (walEdit != null && !walEdit.isEmpty()) {
                writeEntry = doWALAppend(walEdit, batchOp.durability, batchOp.getClusterIds(), now, nonceKey.getNonceGroup(), nonceKey.getNonce(), batchOp.getOrigLogSeqNum());
            }
            // Complete mvcc for all but last writeEntry (for replay case)
            if (it.hasNext() && writeEntry != null) {
                mvcc.complete(writeEntry);
                writeEntry = null;
            }
        }
        // STEP 5. Write back to memStore
        // NOTE: writeEntry can be null here
        writeEntry = batchOp.writeMiniBatchOperationsToMemStore(miniBatchOp, writeEntry);
        // STEP 6. Complete MiniBatchOperations: If required calls postBatchMutate() CP hook and
        // complete mvcc for last writeEntry
        batchOp.completeMiniBatchOperations(miniBatchOp, writeEntry);
        writeEntry = null;
        success = true;
    } finally {
        // Call complete rather than completeAndWait because we probably had error if walKey != null
        if (writeEntry != null)
            mvcc.complete(writeEntry);
        if (locked) {
            this.updatesLock.readLock().unlock();
        }
        releaseRowLocks(acquiredRowLocks);
        enableInterrupts();
        final int finalLastIndexExclusive = miniBatchOp != null ? miniBatchOp.getLastIndexExclusive() : batchOp.size();
        final boolean finalSuccess = success;
        batchOp.visitBatchOperations(true, finalLastIndexExclusive, (int i) -> {
            Mutation mutation = batchOp.getMutation(i);
            if (mutation instanceof Increment || mutation instanceof Append) {
                if (finalSuccess) {
                    batchOp.retCodeDetails[i] = new OperationStatus(OperationStatusCode.SUCCESS, batchOp.results[i]);
                } else {
                    batchOp.retCodeDetails[i] = OperationStatus.FAILURE;
                }
            } else {
                batchOp.retCodeDetails[i] = finalSuccess ? OperationStatus.SUCCESS : OperationStatus.FAILURE;
            }
            return true;
        });
        batchOp.doPostOpCleanupForMiniBatch(miniBatchOp, walEdit, finalSuccess);
        batchOp.nextIndexToProcess = finalLastIndexExclusive;
    }
}
Also used : WriteEntry(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry) Append(org.apache.hadoop.hbase.client.Append) WALEdit(org.apache.hadoop.hbase.wal.WALEdit) NonceKey(org.apache.hadoop.hbase.util.NonceKey) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) Pair(org.apache.hadoop.hbase.util.Pair)

Example 54 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestPassCustomCellViaRegionObserver method testMutation.

@Test
public void testMutation() throws Exception {
    Put put = new Put(ROW);
    put.addColumn(FAMILY, QUALIFIER, VALUE);
    table.put(put);
    byte[] value = VALUE;
    assertResult(table.get(new Get(ROW)), value, value);
    assertObserverHasExecuted();
    Increment inc = new Increment(ROW);
    inc.addColumn(FAMILY, QUALIFIER, 10L);
    table.increment(inc);
    // QUALIFIER -> 10 (put) + 10 (increment)
    // QUALIFIER_FROM_CP -> 10 (from cp's put) + 10 (from cp's increment)
    value = Bytes.toBytes(20L);
    assertResult(table.get(new Get(ROW)), value, value);
    assertObserverHasExecuted();
    Append append = new Append(ROW);
    append.addColumn(FAMILY, QUALIFIER, APPEND_VALUE);
    table.append(append);
    // 10L + "MB"
    value = ByteBuffer.wrap(new byte[value.length + APPEND_VALUE.length]).put(value).put(APPEND_VALUE).array();
    assertResult(table.get(new Get(ROW)), value, value);
    assertObserverHasExecuted();
    Delete delete = new Delete(ROW);
    delete.addColumns(FAMILY, QUALIFIER);
    table.delete(delete);
    assertTrue(Arrays.asList(table.get(new Get(ROW)).rawCells()).toString(), table.get(new Get(ROW)).isEmpty());
    assertObserverHasExecuted();
    assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifNotExists().thenPut(put));
    assertObserverHasExecuted();
    assertTrue(table.checkAndMutate(ROW, FAMILY).qualifier(QUALIFIER).ifEquals(VALUE).thenDelete(delete));
    assertObserverHasExecuted();
    assertTrue(table.get(new Get(ROW)).isEmpty());
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Append(org.apache.hadoop.hbase.client.Append) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 55 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestPostIncrementAndAppendBeforeWAL method testIncrementTTLWithACLTag.

@Test
public void testIncrementTTLWithACLTag() throws Exception {
    TableName tableName = TableName.valueOf(name.getMethodName());
    createTableWithCoprocessor(tableName, ChangeCellWithACLTagObserver.class.getName());
    try (Table table = connection.getTable(tableName)) {
        // Increment without TTL
        Increment firstIncrement = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1).setACL(USER, PERMS);
        Result result = table.increment(firstIncrement);
        assertEquals(1, result.size());
        assertEquals(1, Bytes.toLong(result.getValue(CF1_BYTES, CQ1)));
        // Check if the new cell can be read
        Get get = new Get(ROW).addColumn(CF1_BYTES, CQ1);
        result = table.get(get);
        assertEquals(1, result.size());
        assertEquals(1, Bytes.toLong(result.getValue(CF1_BYTES, CQ1)));
        // Increment with TTL
        Increment secondIncrement = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1).setTTL(1000).setACL(USER, PERMS);
        result = table.increment(secondIncrement);
        // We should get value 2 here
        assertEquals(1, result.size());
        assertEquals(2, Bytes.toLong(result.getValue(CF1_BYTES, CQ1)));
        // Wait 4s to let the second increment expire
        Thread.sleep(4000);
        get = new Get(ROW).addColumn(CF1_BYTES, CQ1);
        result = table.get(get);
        // The value should revert to 1
        assertEquals(1, result.size());
        assertEquals(1, Bytes.toLong(result.getValue(CF1_BYTES, CQ1)));
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5