Search in sources :

Example 46 with Increment

use of org.apache.hadoop.hbase.client.Increment in project phoenix by apache.

the class ConnectionQueryServicesImpl method incrementSequenceValues.

@SuppressWarnings("deprecation")
private void incrementSequenceValues(List<SequenceAllocation> sequenceAllocations, long timestamp, long[] values, SQLException[] exceptions, Sequence.ValueOp op) throws SQLException {
    List<Sequence> sequences = Lists.newArrayListWithExpectedSize(sequenceAllocations.size());
    for (SequenceAllocation sequenceAllocation : sequenceAllocations) {
        SequenceKey key = sequenceAllocation.getSequenceKey();
        Sequence newSequences = new Sequence(key);
        Sequence sequence = sequenceMap.putIfAbsent(key, newSequences);
        if (sequence == null) {
            sequence = newSequences;
        }
        sequences.add(sequence);
    }
    try {
        for (Sequence sequence : sequences) {
            sequence.getLock().lock();
        }
        // Now that we have all the locks we need, increment the sequences
        List<Increment> incrementBatch = Lists.newArrayListWithExpectedSize(sequences.size());
        List<Sequence> toIncrementList = Lists.newArrayListWithExpectedSize(sequences.size());
        int[] indexes = new int[sequences.size()];
        for (int i = 0; i < sequences.size(); i++) {
            Sequence sequence = sequences.get(i);
            try {
                values[i] = sequence.incrementValue(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
            } catch (EmptySequenceCacheException e) {
                indexes[toIncrementList.size()] = i;
                toIncrementList.add(sequence);
                Increment inc = sequence.newIncrement(timestamp, op, sequenceAllocations.get(i).getNumAllocations());
                incrementBatch.add(inc);
            } catch (SQLException e) {
                exceptions[i] = e;
            }
        }
        if (toIncrementList.isEmpty()) {
            return;
        }
        HTableInterface hTable = this.getTable(SchemaUtil.getPhysicalName(PhoenixDatabaseMetaData.SYSTEM_SEQUENCE_NAME_BYTES, this.getProps()).getName());
        Object[] resultObjects = null;
        SQLException sqlE = null;
        try {
            resultObjects = hTable.batch(incrementBatch);
        } catch (IOException e) {
            sqlE = ServerUtil.parseServerException(e);
        } catch (InterruptedException e) {
            // restore the interrupt status
            Thread.currentThread().interrupt();
            sqlE = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).build().buildException();
        } finally {
            try {
                hTable.close();
            } catch (IOException e) {
                if (sqlE == null) {
                    sqlE = ServerUtil.parseServerException(e);
                } else {
                    sqlE.setNextException(ServerUtil.parseServerException(e));
                }
            }
            if (sqlE != null) {
                throw sqlE;
            }
        }
        for (int i = 0; i < resultObjects.length; i++) {
            Sequence sequence = toIncrementList.get(i);
            Result result = (Result) resultObjects[i];
            try {
                long numToAllocate = Bytes.toLong(incrementBatch.get(i).getAttribute(SequenceRegionObserver.NUM_TO_ALLOCATE));
                values[indexes[i]] = sequence.incrementValue(result, op, numToAllocate);
            } catch (SQLException e) {
                exceptions[indexes[i]] = e;
            }
        }
    } finally {
        for (Sequence sequence : sequences) {
            sequence.getLock().unlock();
        }
    }
}
Also used : EmptySequenceCacheException(org.apache.phoenix.schema.EmptySequenceCacheException) SQLException(java.sql.SQLException) Sequence(org.apache.phoenix.schema.Sequence) SequenceAllocation(org.apache.phoenix.schema.SequenceAllocation) IOException(java.io.IOException) PhoenixIOException(org.apache.phoenix.exception.PhoenixIOException) HTableInterface(org.apache.hadoop.hbase.client.HTableInterface) PTinyint(org.apache.phoenix.schema.types.PTinyint) PUnsignedTinyint(org.apache.phoenix.schema.types.PUnsignedTinyint) MultiRowMutationEndpoint(org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint) MetaDataMutationResult(org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult) Result(org.apache.hadoop.hbase.client.Result) SequenceKey(org.apache.phoenix.schema.SequenceKey) Increment(org.apache.hadoop.hbase.client.Increment) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo)

Example 47 with Increment

use of org.apache.hadoop.hbase.client.Increment in project cdap by caskdata.

the class HBaseMetricsTable method incrementAndGet.

@Override
public long incrementAndGet(byte[] row, byte[] column, long delta) {
    byte[] distributedKey = createDistributedRowKey(row);
    Increment increment = new Increment(distributedKey);
    increment.addColumn(columnFamily, column, delta);
    try {
        Result result = hTable.increment(increment);
        return Bytes.toLong(result.getValue(columnFamily, column));
    } catch (IOException e) {
        // currently there is not other way to extract that from the HBase exception than string match
        if (e.getMessage() != null && e.getMessage().contains("isn't 64 bits wide")) {
            throw new NumberFormatException("Attempted to increment a value that is not convertible to long," + " row: " + Bytes.toStringBinary(distributedKey) + " column: " + Bytes.toStringBinary(column));
        }
        throw new DataSetException("IncrementAndGet failed on table " + tableId, e);
    }
}
Also used : DataSetException(co.cask.cdap.api.dataset.DataSetException) Increment(org.apache.hadoop.hbase.client.Increment) IOException(java.io.IOException) Result(org.apache.hadoop.hbase.client.Result)

Example 48 with Increment

use of org.apache.hadoop.hbase.client.Increment in project storm by apache.

the class HBaseClient method constructMutationReq.

public List<Mutation> constructMutationReq(byte[] rowKey, ColumnList cols, Durability durability) {
    List<Mutation> mutations = Lists.newArrayList();
    if (cols.hasColumns()) {
        Put put = new Put(rowKey);
        put.setDurability(durability);
        for (ColumnList.Column col : cols.getColumns()) {
            if (col.getTs() > 0) {
                put.addColumn(col.getFamily(), col.getQualifier(), col.getTs(), col.getValue());
            } else {
                put.addColumn(col.getFamily(), col.getQualifier(), col.getValue());
            }
        }
        mutations.add(put);
    }
    if (cols.hasCounters()) {
        Increment inc = new Increment(rowKey);
        inc.setDurability(durability);
        for (ColumnList.Counter cnt : cols.getCounters()) {
            inc.addColumn(cnt.getFamily(), cnt.getQualifier(), cnt.getIncrement());
        }
        mutations.add(inc);
    }
    if (cols.hasColumnsToDelete()) {
        Delete delete = new Delete(rowKey);
        delete.setDurability(durability);
        for (ColumnList.Column col : cols.getColumnsToDelete()) {
            if (col.getTs() > 0) {
                delete.addColumn(col.getFamily(), col.getQualifier(), col.getTs());
            } else {
                delete.addColumn(col.getFamily(), col.getQualifier());
            }
        }
        mutations.add(delete);
    }
    if (mutations.isEmpty()) {
        mutations.add(new Put(rowKey));
    }
    return mutations;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put)

Example 49 with Increment

use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.

the class RowKeyMerge method createIncrement.

private Increment createIncrement(Map.Entry<RowKey, List<ColumnName>> rowKeyEntry, RowKeyDistributorByHashPrefix rowKeyDistributorByHashPrefix) {
    RowKey rowKey = rowKeyEntry.getKey();
    byte[] key = getRowKey(rowKey, rowKeyDistributorByHashPrefix);
    final Increment increment = new Increment(key);
    for (ColumnName columnName : rowKeyEntry.getValue()) {
        increment.addColumn(family, columnName.getColumnName(), columnName.getCallCount());
    }
    logger.trace("create increment row:{}, column:{}", rowKey, rowKeyEntry.getValue());
    return increment;
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment)

Example 50 with Increment

use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.

the class SizeLimitedBulkIncrementerTest method singleTableConcurrent.

@Test
public void singleTableConcurrent() throws Exception {
    // Given
    TableName tableA = TableName.valueOf("A");
    TestDataSet testDataSetA_0_0 = new TestDataSet(tableA, 0, 0, 1000000);
    TestDataSet testDataSetA_0_1 = new TestDataSet(tableA, 0, 1, 1000001);
    List<TestData> testDatas = new ArrayList<>();
    testDatas.addAll(testDataSetA_0_0.getTestDatas());
    testDatas.addAll(testDataSetA_0_1.getTestDatas());
    Collections.shuffle(testDatas);
    // When
    final int numIncrementers = 16;
    List<List<BulkIncrementerTestClazz.TestData>> testDataPartitions = ListUtils.partition(testDatas, testDatas.size() / (numIncrementers - 1));
    final CountDownLatch completeLatch = new CountDownLatch(testDataPartitions.size());
    final CountDownLatch flusherLatch = new CountDownLatch(1);
    FutureTask<Map<TableName, List<Increment>>> flushTask = new FutureTask<>(new Flusher(bulkIncrementer, rowKeyDistributor, completeLatch, flusherLatch));
    new Thread(flushTask, "Flusher").start();
    int counter = 0;
    for (List<TestData> testDataPartition : testDataPartitions) {
        Incrementer incrementer = new Incrementer(bulkIncrementer, completeLatch, testDataPartition);
        new Thread(incrementer, "Incrementer-" + counter++).start();
    }
    flusherLatch.await(30L, TimeUnit.SECONDS);
    // Then
    Map<TableName, List<Increment>> incrementMap = flushTask.get(5L, TimeUnit.SECONDS);
    TestVerifier verifier = new TestVerifier(incrementMap);
    verifier.verify(testDataSetA_0_0);
    verifier.verify(testDataSetA_0_1);
}
Also used : TestData(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestData) Incrementer(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Incrementer) ArrayList(java.util.ArrayList) CountDownLatch(java.util.concurrent.CountDownLatch) TestDataSet(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestDataSet) TableName(org.apache.hadoop.hbase.TableName) FutureTask(java.util.concurrent.FutureTask) Increment(org.apache.hadoop.hbase.client.Increment) Flusher(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.Flusher) TestVerifier(com.navercorp.pinpoint.collector.dao.hbase.statistics.BulkIncrementerTestClazz.TestVerifier) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) Test(org.junit.Test)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5