Search in sources :

Example 1 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class RequestConverter method buildRegionAction.

/**
   * Create a protocol buffer multi request for a list of actions.
   * Propagates Actions original index.
   *
   * @param regionName
   * @param actions
   * @return a multi request
   * @throws IOException
   */
public static RegionAction.Builder buildRegionAction(final byte[] regionName, final List<Action> actions, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
    ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
    for (Action action : actions) {
        Row row = action.getAction();
        actionBuilder.clear();
        actionBuilder.setIndex(action.getOriginalIndex());
        mutationBuilder.clear();
        if (row instanceof Get) {
            Get g = (Get) row;
            regionActionBuilder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
        } else if (row instanceof Put) {
            regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put) row, mutationBuilder)));
        } else if (row instanceof Delete) {
            regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, (Delete) row, mutationBuilder)));
        } else if (row instanceof Append) {
            regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, (Append) row, mutationBuilder, action.getNonce())));
        } else if (row instanceof Increment) {
            regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation((Increment) row, mutationBuilder, action.getNonce())));
        } else if (row instanceof RegionCoprocessorServiceExec) {
            RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
            // DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
            org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
            if (cpBuilder == null) {
                cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
            } else {
                cpBuilder.clear();
            }
            regionActionBuilder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
        } else if (row instanceof RowMutations) {
            throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
        } else {
            throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
        }
    }
    return regionActionBuilder;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Action(org.apache.hadoop.hbase.client.Action) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Put(org.apache.hadoop.hbase.client.Put) RegionCoprocessorServiceExec(org.apache.hadoop.hbase.client.RegionCoprocessorServiceExec) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Append(org.apache.hadoop.hbase.client.Append) Get(org.apache.hadoop.hbase.client.Get) Increment(org.apache.hadoop.hbase.client.Increment) Row(org.apache.hadoop.hbase.client.Row)

Example 2 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestDistributedLogSplitting method testNonceRecovery.

@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testNonceRecovery() throws Exception {
    LOG.info("testNonceRecovery");
    final String TABLE_NAME = "table";
    final String FAMILY_NAME = "family";
    final int NUM_REGIONS_TO_CREATE = 40;
    conf.setLong("hbase.regionserver.hlog.blocksize", 100 * 1024);
    conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
    startCluster(NUM_RS);
    master.balanceSwitch(false);
    final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
    Table ht = installTable(zkw, TABLE_NAME, FAMILY_NAME, NUM_REGIONS_TO_CREATE);
    NonceGeneratorWithDups ng = new NonceGeneratorWithDups();
    NonceGenerator oldNg = ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection) TEST_UTIL.getConnection(), ng);
    try {
        List<Increment> reqs = new ArrayList<>();
        for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
            HRegionServer hrs = rst.getRegionServer();
            List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
            for (HRegionInfo hri : hris) {
                if (TABLE_NAME.equalsIgnoreCase(hri.getTable().getNameAsString())) {
                    byte[] key = hri.getStartKey();
                    if (key == null || key.length == 0) {
                        key = Bytes.copy(hri.getEndKey());
                        --(key[key.length - 1]);
                    }
                    Increment incr = new Increment(key);
                    incr.addColumn(Bytes.toBytes(FAMILY_NAME), Bytes.toBytes("q"), 1);
                    ht.increment(incr);
                    reqs.add(incr);
                }
            }
        }
        HRegionServer hrs = findRSToKill(false, "table");
        abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
        ng.startDups();
        for (Increment incr : reqs) {
            try {
                ht.increment(incr);
                fail("should have thrown");
            } catch (OperationConflictException ope) {
                LOG.debug("Caught as expected: " + ope.getMessage());
            }
        }
    } finally {
        ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection) TEST_UTIL.getConnection(), oldNg);
        if (ht != null)
            ht.close();
        if (zkw != null)
            zkw.close();
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ArrayList(java.util.ArrayList) NonceGenerator(org.apache.hadoop.hbase.client.NonceGenerator) PerClientRandomNonceGenerator(org.apache.hadoop.hbase.client.PerClientRandomNonceGenerator) HRegionServer(org.apache.hadoop.hbase.regionserver.HRegionServer) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ZooKeeperWatcher(org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher) Increment(org.apache.hadoop.hbase.client.Increment) RegionServerThread(org.apache.hadoop.hbase.util.JVMClusterUtil.RegionServerThread) OperationConflictException(org.apache.hadoop.hbase.exceptions.OperationConflictException) Ignore(org.junit.Ignore) Test(org.junit.Test)

Example 3 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestProtobufUtil method testIncrement.

/**
   * Test Increment Mutate conversions.
   *
   * @throws IOException
   */
@Test
public void testIncrement() throws IOException {
    MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
    mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
    mutateBuilder.setMutateType(MutationType.INCREMENT);
    ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
    valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
    QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
    qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
    qualifierBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(11L)));
    valueBuilder.addQualifierValue(qualifierBuilder.build());
    qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
    qualifierBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(22L)));
    valueBuilder.addQualifierValue(qualifierBuilder.build());
    mutateBuilder.addColumnValue(valueBuilder.build());
    MutationProto proto = mutateBuilder.build();
    // default fields
    assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
    // set the default value for equal comparison
    mutateBuilder = MutationProto.newBuilder(proto);
    mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
    Increment increment = ProtobufUtil.toIncrement(proto, null);
    assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(increment, MutationProto.newBuilder(), HConstants.NO_NONCE));
}
Also used : QualifierValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) Increment(org.apache.hadoop.hbase.client.Increment) ColumnValue(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.ColumnValue) MutationProto(org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto) Test(org.junit.Test)

Example 4 with Increment

use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.

the class RowKeyMerge method createBulkIncrement.

public List<Increment> createBulkIncrement(Map<RowInfo, ConcurrentCounterMap.LongAdder> data, RowKeyDistributorByHashPrefix rowKeyDistributorByHashPrefix) {
    if (data.isEmpty()) {
        return Collections.emptyList();
    }
    final Map<RowKey, List<ColumnName>> rowkeyMerge = rowKeyBaseMerge(data);
    List<Increment> incrementList = new ArrayList<>();
    for (Map.Entry<RowKey, List<ColumnName>> rowKeyEntry : rowkeyMerge.entrySet()) {
        Increment increment = createIncrement(rowKeyEntry, rowKeyDistributorByHashPrefix);
        incrementList.add(increment);
    }
    return incrementList;
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment) ConcurrentCounterMap(com.navercorp.pinpoint.collector.util.ConcurrentCounterMap)

Example 5 with Increment

use of org.apache.hadoop.hbase.client.Increment in project metron by apache.

the class TupleTableConfig method getIncrementFromTuple.

/**
 * Creates a HBase {@link Increment} from a Storm {@link Tuple}
 *
 * @param tuple
 *          The {@link Tuple}
 * @param increment
 *          The amount to increment the counter by
 * @return {@link Increment}
 */
public Increment getIncrementFromTuple(final Tuple tuple, final long increment) {
    byte[] rowKey = Bytes.toBytes(tuple.getStringByField(tupleRowKeyField));
    Increment inc = new Increment(rowKey);
    inc.setDurability(durability);
    if (columnFamilies.size() > 0) {
        for (String cf : columnFamilies.keySet()) {
            byte[] cfBytes = Bytes.toBytes(cf);
            for (String cq : columnFamilies.get(cf)) {
                byte[] val;
                try {
                    val = Bytes.toBytes(tuple.getStringByField(cq));
                } catch (IllegalArgumentException ex) {
                    // if cq isn't a tuple field, use cq for counter instead of tuple
                    // value
                    val = Bytes.toBytes(cq);
                }
                inc.addColumn(cfBytes, val, increment);
            }
        }
    }
    return inc;
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5