use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class RequestConverter method buildRegionAction.
/**
* Create a protocol buffer multi request for a list of actions.
* Propagates Actions original index.
*
* @param regionName
* @param actions
* @return a multi request
* @throws IOException
*/
public static RegionAction.Builder buildRegionAction(final byte[] regionName, final List<Action> actions, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
ClientProtos.CoprocessorServiceCall.Builder cpBuilder = null;
for (Action action : actions) {
Row row = action.getAction();
actionBuilder.clear();
actionBuilder.setIndex(action.getOriginalIndex());
mutationBuilder.clear();
if (row instanceof Get) {
Get g = (Get) row;
regionActionBuilder.addAction(actionBuilder.setGet(ProtobufUtil.toGet(g)));
} else if (row instanceof Put) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.PUT, (Put) row, mutationBuilder)));
} else if (row instanceof Delete) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.DELETE, (Delete) row, mutationBuilder)));
} else if (row instanceof Append) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation(MutationType.APPEND, (Append) row, mutationBuilder, action.getNonce())));
} else if (row instanceof Increment) {
regionActionBuilder.addAction(actionBuilder.setMutation(ProtobufUtil.toMutation((Increment) row, mutationBuilder, action.getNonce())));
} else if (row instanceof RegionCoprocessorServiceExec) {
RegionCoprocessorServiceExec exec = (RegionCoprocessorServiceExec) row;
// DUMB COPY!!! FIX!!! Done to copy from c.g.p.ByteString to shaded ByteString.
org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString value = org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.unsafeWrap(exec.getRequest().toByteArray());
if (cpBuilder == null) {
cpBuilder = ClientProtos.CoprocessorServiceCall.newBuilder();
} else {
cpBuilder.clear();
}
regionActionBuilder.addAction(actionBuilder.setServiceCall(cpBuilder.setRow(UnsafeByteOperations.unsafeWrap(exec.getRow())).setServiceName(exec.getMethod().getService().getFullName()).setMethodName(exec.getMethod().getName()).setRequest(value)));
} else if (row instanceof RowMutations) {
throw new UnsupportedOperationException("No RowMutations in multi calls; use mutateRow");
} else {
throw new DoNotRetryIOException("Multi doesn't support " + row.getClass().getName());
}
}
return regionActionBuilder;
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestDistributedLogSplitting method testNonceRecovery.
@Ignore("DLR is broken by HBASE-12751")
@Test(timeout = 300000)
public void testNonceRecovery() throws Exception {
LOG.info("testNonceRecovery");
final String TABLE_NAME = "table";
final String FAMILY_NAME = "family";
final int NUM_REGIONS_TO_CREATE = 40;
conf.setLong("hbase.regionserver.hlog.blocksize", 100 * 1024);
conf.setBoolean(HConstants.DISTRIBUTED_LOG_REPLAY_KEY, true);
startCluster(NUM_RS);
master.balanceSwitch(false);
final ZooKeeperWatcher zkw = new ZooKeeperWatcher(conf, "table-creation", null);
Table ht = installTable(zkw, TABLE_NAME, FAMILY_NAME, NUM_REGIONS_TO_CREATE);
NonceGeneratorWithDups ng = new NonceGeneratorWithDups();
NonceGenerator oldNg = ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection) TEST_UTIL.getConnection(), ng);
try {
List<Increment> reqs = new ArrayList<>();
for (RegionServerThread rst : cluster.getLiveRegionServerThreads()) {
HRegionServer hrs = rst.getRegionServer();
List<HRegionInfo> hris = ProtobufUtil.getOnlineRegions(hrs.getRSRpcServices());
for (HRegionInfo hri : hris) {
if (TABLE_NAME.equalsIgnoreCase(hri.getTable().getNameAsString())) {
byte[] key = hri.getStartKey();
if (key == null || key.length == 0) {
key = Bytes.copy(hri.getEndKey());
--(key[key.length - 1]);
}
Increment incr = new Increment(key);
incr.addColumn(Bytes.toBytes(FAMILY_NAME), Bytes.toBytes("q"), 1);
ht.increment(incr);
reqs.add(incr);
}
}
}
HRegionServer hrs = findRSToKill(false, "table");
abortRSAndWaitForRecovery(hrs, zkw, NUM_REGIONS_TO_CREATE);
ng.startDups();
for (Increment incr : reqs) {
try {
ht.increment(incr);
fail("should have thrown");
} catch (OperationConflictException ope) {
LOG.debug("Caught as expected: " + ope.getMessage());
}
}
} finally {
ConnectionUtils.injectNonceGeneratorForTesting((ClusterConnection) TEST_UTIL.getConnection(), oldNg);
if (ht != null)
ht.close();
if (zkw != null)
zkw.close();
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestProtobufUtil method testIncrement.
/**
* Test Increment Mutate conversions.
*
* @throws IOException
*/
@Test
public void testIncrement() throws IOException {
MutationProto.Builder mutateBuilder = MutationProto.newBuilder();
mutateBuilder.setRow(ByteString.copyFromUtf8("row"));
mutateBuilder.setMutateType(MutationType.INCREMENT);
ColumnValue.Builder valueBuilder = ColumnValue.newBuilder();
valueBuilder.setFamily(ByteString.copyFromUtf8("f1"));
QualifierValue.Builder qualifierBuilder = QualifierValue.newBuilder();
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c1"));
qualifierBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(11L)));
valueBuilder.addQualifierValue(qualifierBuilder.build());
qualifierBuilder.setQualifier(ByteString.copyFromUtf8("c2"));
qualifierBuilder.setValue(ByteString.copyFrom(Bytes.toBytes(22L)));
valueBuilder.addQualifierValue(qualifierBuilder.build());
mutateBuilder.addColumnValue(valueBuilder.build());
MutationProto proto = mutateBuilder.build();
// default fields
assertEquals(MutationProto.Durability.USE_DEFAULT, proto.getDurability());
// set the default value for equal comparison
mutateBuilder = MutationProto.newBuilder(proto);
mutateBuilder.setDurability(MutationProto.Durability.USE_DEFAULT);
Increment increment = ProtobufUtil.toIncrement(proto, null);
assertEquals(mutateBuilder.build(), ProtobufUtil.toMutation(increment, MutationProto.newBuilder(), HConstants.NO_NONCE));
}
use of org.apache.hadoop.hbase.client.Increment in project pinpoint by naver.
the class RowKeyMerge method createBulkIncrement.
public List<Increment> createBulkIncrement(Map<RowInfo, ConcurrentCounterMap.LongAdder> data, RowKeyDistributorByHashPrefix rowKeyDistributorByHashPrefix) {
if (data.isEmpty()) {
return Collections.emptyList();
}
final Map<RowKey, List<ColumnName>> rowkeyMerge = rowKeyBaseMerge(data);
List<Increment> incrementList = new ArrayList<>();
for (Map.Entry<RowKey, List<ColumnName>> rowKeyEntry : rowkeyMerge.entrySet()) {
Increment increment = createIncrement(rowKeyEntry, rowKeyDistributorByHashPrefix);
incrementList.add(increment);
}
return incrementList;
}
use of org.apache.hadoop.hbase.client.Increment in project metron by apache.
the class TupleTableConfig method getIncrementFromTuple.
/**
* Creates a HBase {@link Increment} from a Storm {@link Tuple}
*
* @param tuple
* The {@link Tuple}
* @param increment
* The amount to increment the counter by
* @return {@link Increment}
*/
public Increment getIncrementFromTuple(final Tuple tuple, final long increment) {
byte[] rowKey = Bytes.toBytes(tuple.getStringByField(tupleRowKeyField));
Increment inc = new Increment(rowKey);
inc.setDurability(durability);
if (columnFamilies.size() > 0) {
for (String cf : columnFamilies.keySet()) {
byte[] cfBytes = Bytes.toBytes(cf);
for (String cq : columnFamilies.get(cf)) {
byte[] val;
try {
val = Bytes.toBytes(tuple.getStringByField(cq));
} catch (IllegalArgumentException ex) {
// if cq isn't a tuple field, use cq for counter instead of tuple
// value
val = Bytes.toBytes(cq);
}
inc.addColumn(cfBytes, val, increment);
}
}
}
return inc;
}
Aggregations