Search in sources :

Example 56 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestPostIncrementAndAppendBeforeWAL method testChangeCellWithDifferntColumnFamily.

@Test
public void testChangeCellWithDifferntColumnFamily() throws Exception {
    TableName tableName = TableName.valueOf(name.getMethodName());
    createTableWithCoprocessor(tableName, ChangeCellWithDifferntColumnFamilyObserver.class.getName());
    try (Table table = connection.getTable(tableName)) {
        Increment increment = new Increment(ROW).addColumn(CF1_BYTES, CQ1, 1);
        table.increment(increment);
        Get get = new Get(ROW).addColumn(CF2_BYTES, CQ1);
        Result result = table.get(get);
        assertEquals(1, result.size());
        assertEquals(1, Bytes.toLong(result.getValue(CF2_BYTES, CQ1)));
        Append append = new Append(ROW).addColumn(CF1_BYTES, CQ2, VALUE);
        table.append(append);
        get = new Get(ROW).addColumn(CF2_BYTES, CQ2);
        result = table.get(get);
        assertEquals(1, result.size());
        assertTrue(Bytes.equals(VALUE, result.getValue(CF2_BYTES, CQ2)));
    }
}
Also used : TableName(org.apache.hadoop.hbase.TableName) Table(org.apache.hadoop.hbase.client.Table) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Get(org.apache.hadoop.hbase.client.Get) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 57 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class TestIncrementTimeRange method checkHTableInterfaceMethods.

private void checkHTableInterfaceMethods() throws Exception {
    long time = EnvironmentEdgeManager.currentTime();
    mee.setValue(time);
    hTableInterface.put(new Put(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, Bytes.toBytes(1L)));
    checkRowValue(ROW_A, Bytes.toBytes(1L));
    time = EnvironmentEdgeManager.currentTime();
    mee.setValue(time);
    TimeRange range10 = TimeRange.between(1, time + 10);
    hTableInterface.increment(new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 10L).setTimeRange(range10.getMin(), range10.getMax()));
    checkRowValue(ROW_A, Bytes.toBytes(11L));
    assertEquals(MyObserver.tr10.getMin(), range10.getMin());
    assertEquals(MyObserver.tr10.getMax(), range10.getMax());
    time = EnvironmentEdgeManager.currentTime();
    mee.setValue(time);
    TimeRange range2 = TimeRange.between(1, time + 20);
    List<Row> actions = Arrays.asList(new Row[] { new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), range2.getMax()), new Increment(ROW_A).addColumn(TEST_FAMILY, qualifierCol1, 2L).setTimeRange(range2.getMin(), range2.getMax()) });
    Object[] results3 = new Object[actions.size()];
    Object[] results1 = results3;
    hTableInterface.batch(actions, results1);
    assertEquals(MyObserver.tr2.getMin(), range2.getMin());
    assertEquals(MyObserver.tr2.getMax(), range2.getMax());
    for (Object r2 : results1) {
        assertTrue(r2 instanceof Result);
    }
    checkRowValue(ROW_A, Bytes.toBytes(15L));
    hTableInterface.close();
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) Increment(org.apache.hadoop.hbase.client.Increment) Row(org.apache.hadoop.hbase.client.Row) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result)

Example 58 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class AccessController method preBatchMutate.

@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
        TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
        User user = getActiveUser(c);
        for (int i = 0; i < miniBatchOp.size(); i++) {
            Mutation m = miniBatchOp.getOperation(i);
            if (m.getAttribute(CHECK_COVERING_PERM) != null) {
                // We have a failure with table, cf and q perm checks and now giving a chance for cell
                // perm check
                OpType opType;
                long timestamp;
                if (m instanceof Put) {
                    checkForReservedTagPresence(user, m);
                    opType = OpType.PUT;
                    timestamp = m.getTimestamp();
                } else if (m instanceof Delete) {
                    opType = OpType.DELETE;
                    timestamp = m.getTimestamp();
                } else if (m instanceof Increment) {
                    opType = OpType.INCREMENT;
                    timestamp = ((Increment) m).getTimeRange().getMax();
                } else if (m instanceof Append) {
                    opType = OpType.APPEND;
                    timestamp = ((Append) m).getTimeRange().getMax();
                } else {
                    // If the operation type is not Put/Delete/Increment/Append, do nothing
                    continue;
                }
                AuthResult authResult = null;
                if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), m.getFamilyCellMap(), timestamp, Action.WRITE)) {
                    authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap());
                } else {
                    authResult = AuthResult.deny(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap());
                }
                AccessChecker.logResult(authResult);
                if (authorizationEnabled && !authResult.isAllowed()) {
                    throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
                }
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) TableName(org.apache.hadoop.hbase.TableName) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) Put(org.apache.hadoop.hbase.client.Put)

Example 59 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class RSRpcServices method checkAndMutate.

private CheckAndMutateResult checkAndMutate(HRegion region, List<ClientProtos.Action> actions, CellScanner cellScanner, Condition condition, long nonceGroup, ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
    int countOfCompleteMutation = 0;
    try {
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        List<Mutation> mutations = new ArrayList<>();
        long nonce = HConstants.NO_NONCE;
        for (ClientProtos.Action action : actions) {
            if (action.hasGet()) {
                throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
            }
            MutationProto mutation = action.getMutation();
            MutationType type = mutation.getMutateType();
            switch(type) {
                case PUT:
                    Put put = ProtobufUtil.toPut(mutation, cellScanner);
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, put);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(put);
                    mutations.add(put);
                    break;
                case DELETE:
                    Delete del = ProtobufUtil.toDelete(mutation, cellScanner);
                    ++countOfCompleteMutation;
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(del);
                    mutations.add(del);
                    break;
                case INCREMENT:
                    Increment increment = ProtobufUtil.toIncrement(mutation, cellScanner);
                    nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, increment);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(increment);
                    mutations.add(increment);
                    break;
                case APPEND:
                    Append append = ProtobufUtil.toAppend(mutation, cellScanner);
                    nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, append);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(append);
                    mutations.add(append);
                    break;
                default:
                    throw new DoNotRetryIOException("invalid mutation type : " + type);
            }
        }
        if (mutations.size() == 0) {
            return new CheckAndMutateResult(true, null);
        } else {
            CheckAndMutate checkAndMutate = ProtobufUtil.toCheckAndMutate(condition, mutations);
            CheckAndMutateResult result = null;
            if (region.getCoprocessorHost() != null) {
                result = region.getCoprocessorHost().preCheckAndMutate(checkAndMutate);
            }
            if (result == null) {
                result = region.checkAndMutate(checkAndMutate, nonceGroup, nonce);
                if (region.getCoprocessorHost() != null) {
                    result = region.getCoprocessorHost().postCheckAndMutate(checkAndMutate, result);
                }
            }
            return result;
        }
    } finally {
        // even if the malformed cells are not skipped.
        for (int i = countOfCompleteMutation; i < actions.size(); ++i) {
            skipCellsForMutation(actions.get(i), cellScanner);
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) ArrayList(java.util.ArrayList) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) Put(org.apache.hadoop.hbase.client.Put) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 60 with Increment

use of org.apache.hadoop.hbase.client.Increment in project hbase by apache.

the class RSRpcServices method increment.

/**
 * Execute an increment mutation.
 */
private Result increment(final HRegion region, final OperationQuota quota, final MutationProto mutation, final CellScanner cells, long nonceGroup, ActivePolicyEnforcement spaceQuota) throws IOException {
    long before = EnvironmentEdgeManager.currentTime();
    Increment increment = ProtobufUtil.toIncrement(mutation, cells);
    checkCellSizeLimit(region, increment);
    spaceQuota.getPolicyEnforcement(region).check(increment);
    quota.addMutation(increment);
    long nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
    Result r = region.increment(increment, nonceGroup, nonce);
    final MetricsRegionServer metricsRegionServer = server.getMetrics();
    if (metricsRegionServer != null) {
        metricsRegionServer.updateIncrement(region.getTableDescriptor().getTableName(), EnvironmentEdgeManager.currentTime() - before);
    }
    return r == null ? Result.EMPTY_RESULT : r;
}
Also used : Increment(org.apache.hadoop.hbase.client.Increment) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult)

Aggregations

Increment (org.apache.hadoop.hbase.client.Increment)81 Test (org.junit.Test)42 Put (org.apache.hadoop.hbase.client.Put)31 Append (org.apache.hadoop.hbase.client.Append)25 Result (org.apache.hadoop.hbase.client.Result)25 Delete (org.apache.hadoop.hbase.client.Delete)21 Get (org.apache.hadoop.hbase.client.Get)19 IOException (java.io.IOException)16 TableName (org.apache.hadoop.hbase.TableName)15 Table (org.apache.hadoop.hbase.client.Table)15 ArrayList (java.util.ArrayList)14 Cell (org.apache.hadoop.hbase.Cell)11 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)11 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)9 Mutation (org.apache.hadoop.hbase.client.Mutation)9 RowMutations (org.apache.hadoop.hbase.client.RowMutations)9 List (java.util.List)8 Map (java.util.Map)8 Scan (org.apache.hadoop.hbase.client.Scan)7 KeyValue (org.apache.hadoop.hbase.KeyValue)5