use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestPostIncrementAndAppendBeforeWAL method testChangeCellWithDifferntColumnFamily.
@Test
public void testChangeCellWithDifferntColumnFamily() throws Exception {
TableName tableName = TableName.valueOf(name.getMethodName());
createTableWithCoprocessor(tableName, ChangeCellWithDifferntColumnFamilyObserver.class.getName());
try (Table table = connection.getTable(tableName)) {
Increment increment = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1);
table.increment(increment);
Get get = new Get(ROW).addColumn(CF2_BYTES, CQ1);
Result result = table.get(get);
assertEquals(1, result.size());
assertEquals(1, Bytes.toLong(result.getValue(CF2_BYTES, CQ1)));
Append append = new Append(ROW).addColumn(CF1_BYTES, CQ2, VALUE);
table.append(append);
get = new Get(ROW).addColumn(CF2_BYTES, CQ2);
result = table.get(get);
assertEquals(1, result.size());
assertTrue(Bytes.equals(VALUE, result.getValue(CF2_BYTES, CQ2)));
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class TestIncrementTimeRange method checkHTableInterfaceMethods.
private void checkHTableInterfaceMethods() throws Exception {
long time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
hTableInterface.put(new Put(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, Bytes.toBytes(1L)));
checkRowValue(ROW_A, Bytes.toBytes(1L));
time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
TimeRange range10 = TimeRange.between(1, time + 10);
hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L).setTimeRange(range10.getMin(), range10.getMax()));
checkRowValue(ROW_A, Bytes.toBytes(11L));
assertEquals(MyObserver.tr10.getMin(), range10.getMin());
assertEquals(MyObserver.tr10.getMax(), range10.getMax());
time = EnvironmentEdgeManager.currentTime();
mee.setValue(time);
TimeRange range2 = TimeRange.between(1, time + 20);
List<Row> actions = Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), range2.getMax()), new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), range2.getMax()) });
Object[] results3 = new Object[actions.size()];
Object[] results1 = results3;
hTableInterface.batch(actions, results1);
assertEquals(MyObserver.tr2.getMin(), range2.getMin());
assertEquals(MyObserver.tr2.getMax(), range2.getMax());
for (Object r2 : results1) {
assertTrue(r2 instanceof Result);
}
checkRowValue(ROW_A, Bytes.toBytes(15L));
hTableInterface.close();
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class AccessController method preBatchMutate.
@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
if (cellFeaturesEnabled && !compatibleEarlyTermination) {
TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
User user = getActiveUser(c);
for (int i = 0; i < miniBatchOp.size(); i++) {
Mutation m = miniBatchOp.getOperation(i);
if (m.getAttribute(CHECK_COVERING_PERM) != null) {
// We have a failure with table, cf and q perm checks and now giving a chance for cell
// perm check
OpType opType;
long timestamp;
if (m instanceof Put) {
checkForReservedTagPresence(user, m);
opType = OpType.PUT;
timestamp = m.getTimestamp();
} else if (m instanceof Delete) {
opType = OpType.DELETE;
timestamp = m.getTimestamp();
} else if (m instanceof Increment) {
opType = OpType.INCREMENT;
timestamp = ((Increment) m).getTimeRange().getMax();
} else if (m instanceof Append) {
opType = OpType.APPEND;
timestamp = ((Append) m).getTimeRange().getMax();
} else {
// If the operation type is not Put/Delete/Increment/Append, do nothing
continue;
}
AuthResult authResult = null;
if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), m.getFamilyCellMap(), timestamp, Action.WRITE)) {
authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap());
} else {
authResult = AuthResult.deny(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap());
}
AccessChecker.logResult(authResult);
if (authorizationEnabled && !authResult.isAllowed()) {
throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
}
}
}
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class RSRpcServices method checkAndMutate.
private CheckAndMutateResult checkAndMutate(HRegion region, List<ClientProtos.Action> actions, CellScanner cellScanner, Condition condition, long nonceGroup, ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
int countOfCompleteMutation = 0;
try {
if (!region.getRegionInfo().isMetaRegion()) {
server.getMemStoreFlusher().reclaimMemStoreMemory();
}
List<Mutation> mutations = new ArrayList<>();
long nonce = HConstants.NO_NONCE;
for (ClientProtos.Action action : actions) {
if (action.hasGet()) {
throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
}
MutationProto mutation = action.getMutation();
MutationType type = mutation.getMutateType();
switch(type) {
case PUT:
Put put = ProtobufUtil.toPut(mutation, cellScanner);
++countOfCompleteMutation;
checkCellSizeLimit(region, put);
spaceQuotaEnforcement.getPolicyEnforcement(region).check(put);
mutations.add(put);
break;
case DELETE:
Delete del = ProtobufUtil.toDelete(mutation, cellScanner);
++countOfCompleteMutation;
spaceQuotaEnforcement.getPolicyEnforcement(region).check(del);
mutations.add(del);
break;
case INCREMENT:
Increment increment = ProtobufUtil.toIncrement(mutation, cellScanner);
nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
++countOfCompleteMutation;
checkCellSizeLimit(region, increment);
spaceQuotaEnforcement.getPolicyEnforcement(region).check(increment);
mutations.add(increment);
break;
case APPEND:
Append append = ProtobufUtil.toAppend(mutation, cellScanner);
nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
++countOfCompleteMutation;
checkCellSizeLimit(region, append);
spaceQuotaEnforcement.getPolicyEnforcement(region).check(append);
mutations.add(append);
break;
default:
throw new DoNotRetryIOException("invalid mutation type : " + type);
}
}
if (mutations.size() == 0) {
return new CheckAndMutateResult(true, null);
} else {
CheckAndMutate checkAndMutate = ProtobufUtil.toCheckAndMutate(condition, mutations);
CheckAndMutateResult result = null;
if (region.getCoprocessorHost() != null) {
result = region.getCoprocessorHost().preCheckAndMutate(checkAndMutate);
}
if (result == null) {
result = region.checkAndMutate(checkAndMutate, nonceGroup, nonce);
if (region.getCoprocessorHost() != null) {
result = region.getCoprocessorHost().postCheckAndMutate(checkAndMutate, result);
}
}
return result;
}
} finally {
// even if the malformed cells are not skipped.
for (int i = countOfCompleteMutation; i < actions.size(); ++i) {
skipCellsForMutation(actions.get(i), cellScanner);
}
}
}
use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.
the class RSRpcServices method increment.
/**
* Execute an increment mutation.
*/
private Result increment(final HRegion region, final OperationQuota quota, final MutationProto mutation, final CellScanner cells, long nonceGroup, ActivePolicyEnforcement spaceQuota) throws IOException {
long before = EnvironmentEdgeManager.currentTime();
Increment increment = ProtobufUtil.toIncrement(mutation, cells);
checkCellSizeLimit(region, increment);
spaceQuota.getPolicyEnforcement(region).check(increment);
quota.addMutation(increment);
long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
Result r = region.increment(increment, nonceGroup, nonce);
final MetricsRegionServer metricsRegionServer = server.getMetrics();
if (metricsRegionServer != null) {
metricsRegionServer.updateIncrement(region.getTableDescriptor().getTableName(), EnvironmentEdgeManager.currentTime() - before);
}
return r == null ? Result.EMPTY_RESULT : r;
}
Aggregations