Search in sources :

Example 41 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class AccessController method preBatchMutate.

@Override
public void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c, MiniBatchOperationInProgress<Mutation> miniBatchOp) throws IOException {
    if (cellFeaturesEnabled && !compatibleEarlyTermination) {
        TableName table = c.getEnvironment().getRegion().getRegionInfo().getTable();
        User user = getActiveUser(c);
        for (int i = 0; i < miniBatchOp.size(); i++) {
            Mutation m = miniBatchOp.getOperation(i);
            if (m.getAttribute(CHECK_COVERING_PERM) != null) {
                // We have a failure with table, cf and q perm checks and now giving a chance for cell
                // perm check
                OpType opType;
                long timestamp;
                if (m instanceof Put) {
                    checkForReservedTagPresence(user, m);
                    opType = OpType.PUT;
                    timestamp = m.getTimestamp();
                } else if (m instanceof Delete) {
                    opType = OpType.DELETE;
                    timestamp = m.getTimestamp();
                } else if (m instanceof Increment) {
                    opType = OpType.INCREMENT;
                    timestamp = ((Increment) m).getTimeRange().getMax();
                } else if (m instanceof Append) {
                    opType = OpType.APPEND;
                    timestamp = ((Append) m).getTimeRange().getMax();
                } else {
                    // If the operation type is not Put/Delete/Increment/Append, do nothing
                    continue;
                }
                AuthResult authResult = null;
                if (checkCoveringPermission(user, opType, c.getEnvironment(), m.getRow(), m.getFamilyCellMap(), timestamp, Action.WRITE)) {
                    authResult = AuthResult.allow(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap());
                } else {
                    authResult = AuthResult.deny(opType.toString(), "Covering cell set", user, Action.WRITE, table, m.getFamilyCellMap());
                }
                AccessChecker.logResult(authResult);
                if (authorizationEnabled && !authResult.isAllowed()) {
                    throw new AccessDeniedException("Insufficient permissions " + authResult.toContextString());
                }
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) TableName(org.apache.hadoop.hbase.TableName) AccessDeniedException(org.apache.hadoop.hbase.security.AccessDeniedException) User(org.apache.hadoop.hbase.security.User) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) ReplicationEndpoint(org.apache.hadoop.hbase.replication.ReplicationEndpoint) Put(org.apache.hadoop.hbase.client.Put)

Example 42 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class TestAppendTimeRange method testHTableInterfaceMethods.

@Test
public void testHTableInterfaceMethods() throws Exception {
    try (Table table = util.createTable(TableName.valueOf(name.getMethodName()), TEST_FAMILY)) {
        table.put(new Put(ROW).addColumn(TEST_FAMILY, QUAL, VALUE));
        long time = EnvironmentEdgeManager.currentTime();
        mee.setValue(time);
        table.put(new Put(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("a")));
        checkRowValue(table, ROW, Bytes.toBytes("a"));
        time = EnvironmentEdgeManager.currentTime();
        mee.setValue(time);
        TimeRange range10 = TimeRange.between(1, time + 10);
        table.append(new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("b")).setTimeRange(range10.getMin(), range10.getMax()));
        checkRowValue(table, ROW, Bytes.toBytes("ab"));
        assertEquals(MyObserver.tr10.getMin(), range10.getMin());
        assertEquals(MyObserver.tr10.getMax(), range10.getMax());
        time = EnvironmentEdgeManager.currentTime();
        mee.setValue(time);
        TimeRange range2 = TimeRange.between(1, time + 20);
        List<Row> actions = Arrays.asList(new Row[] { new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")).setTimeRange(range2.getMin(), range2.getMax()), new Append(ROW).addColumn(TEST_FAMILY, QUAL, Bytes.toBytes("c")).setTimeRange(range2.getMin(), range2.getMax()) });
        Object[] results1 = new Object[actions.size()];
        table.batch(actions, results1);
        assertEquals(MyObserver.tr2.getMin(), range2.getMin());
        assertEquals(MyObserver.tr2.getMax(), range2.getMax());
        for (Object r2 : results1) {
            assertTrue(r2 instanceof Result);
        }
        checkRowValue(table, ROW, Bytes.toBytes("abcc"));
    }
}
Also used : TimeRange(org.apache.hadoop.hbase.io.TimeRange) Table(org.apache.hadoop.hbase.client.Table) Append(org.apache.hadoop.hbase.client.Append) Row(org.apache.hadoop.hbase.client.Row) Put(org.apache.hadoop.hbase.client.Put) Result(org.apache.hadoop.hbase.client.Result) Test(org.junit.Test)

Example 43 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class RSRpcServices method checkAndMutate.

private CheckAndMutateResult checkAndMutate(HRegion region, List<ClientProtos.Action> actions, CellScanner cellScanner, Condition condition, long nonceGroup, ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
    int countOfCompleteMutation = 0;
    try {
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        List<Mutation> mutations = new ArrayList<>();
        long nonce = HConstants.NO_NONCE;
        for (ClientProtos.Action action : actions) {
            if (action.hasGet()) {
                throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
            }
            MutationProto mutation = action.getMutation();
            MutationType type = mutation.getMutateType();
            switch(type) {
                case PUT:
                    Put put = ProtobufUtil.toPut(mutation, cellScanner);
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, put);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(put);
                    mutations.add(put);
                    break;
                case DELETE:
                    Delete del = ProtobufUtil.toDelete(mutation, cellScanner);
                    ++countOfCompleteMutation;
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(del);
                    mutations.add(del);
                    break;
                case INCREMENT:
                    Increment increment = ProtobufUtil.toIncrement(mutation, cellScanner);
                    nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, increment);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(increment);
                    mutations.add(increment);
                    break;
                case APPEND:
                    Append append = ProtobufUtil.toAppend(mutation, cellScanner);
                    nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, append);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(append);
                    mutations.add(append);
                    break;
                default:
                    throw new DoNotRetryIOException("invalid mutation type : " + type);
            }
        }
        if (mutations.size() == 0) {
            return new CheckAndMutateResult(true, null);
        } else {
            CheckAndMutate checkAndMutate = ProtobufUtil.toCheckAndMutate(condition, mutations);
            CheckAndMutateResult result = null;
            if (region.getCoprocessorHost() != null) {
                result = region.getCoprocessorHost().preCheckAndMutate(checkAndMutate);
            }
            if (result == null) {
                result = region.checkAndMutate(checkAndMutate, nonceGroup, nonce);
                if (region.getCoprocessorHost() != null) {
                    result = region.getCoprocessorHost().postCheckAndMutate(checkAndMutate, result);
                }
            }
            return result;
        }
    } finally {
        // even if the malformed cells are not skipped.
        for (int i = countOfCompleteMutation; i < actions.size(); ++i) {
            skipCellsForMutation(actions.get(i), cellScanner);
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) ArrayList(java.util.ArrayList) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) Put(org.apache.hadoop.hbase.client.Put) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 44 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class RequestConverter method buildMultiRequest.

private static ClientProtos.MultiRequest buildMultiRequest(final byte[] regionName, final RowMutations rowMutations, final Condition condition, long nonceGroup, long nonce) throws IOException {
    RegionAction.Builder builder = getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName);
    builder.setAtomic(true);
    boolean hasNonce = false;
    ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder();
    MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
    for (Mutation mutation : rowMutations.getMutations()) {
        mutationBuilder.clear();
        MutationProto mp;
        if (mutation instanceof Increment || mutation instanceof Append) {
            mp = ProtobufUtil.toMutation(getMutationType(mutation), mutation, mutationBuilder, nonce);
            hasNonce = true;
        } else {
            mp = ProtobufUtil.toMutation(getMutationType(mutation), mutation, mutationBuilder);
        }
        actionBuilder.clear();
        actionBuilder.setMutation(mp);
        builder.addAction(actionBuilder.build());
    }
    if (condition != null) {
        builder.setCondition(condition);
    }
    MultiRequest.Builder multiRequestBuilder = MultiRequest.newBuilder();
    if (hasNonce) {
        multiRequestBuilder.setNonceGroup(nonceGroup);
    }
    return multiRequestBuilder.addRegionAction(builder.build()).build();
}
Also used : Action(org.apache.hadoop.hbase.client.Action) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Append(org.apache.hadoop.hbase.client.Append) MultiRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRequest) Increment(org.apache.hadoop.hbase.client.Increment) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Mutation(org.apache.hadoop.hbase.client.Mutation) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)

Example 45 with Append

use of org.apache.hadoop.hbase.client.Append in project hbase by apache.

the class ProtobufUtil method toMutation.

public static MutationProto toMutation(final MutationType type, final Mutation mutation, MutationProto.Builder builder, long nonce) throws IOException {
    builder = getMutationBuilderAndSetCommonFields(type, mutation, builder);
    if (nonce != HConstants.NO_NONCE) {
        builder.setNonce(nonce);
    }
    if (type == MutationType.INCREMENT) {
        builder.setTimeRange(ProtobufUtil.toTimeRange(((Increment) mutation).getTimeRange()));
    }
    if (type == MutationType.APPEND) {
        builder.setTimeRange(ProtobufUtil.toTimeRange(((Append) mutation).getTimeRange()));
    }
    ColumnValue.Builder columnBuilder = ColumnValue.newBuilder();
    QualifierValue.Builder valueBuilder = QualifierValue.newBuilder();
    for (Map.Entry<byte[], List<Cell>> family : mutation.getFamilyCellMap().entrySet()) {
        columnBuilder.clear();
        columnBuilder.setFamily(UnsafeByteOperations.unsafeWrap(family.getKey()));
        for (Cell cell : family.getValue()) {
            valueBuilder.clear();
            valueBuilder.setQualifier(UnsafeByteOperations.unsafeWrap(cell.getQualifierArray(), cell.getQualifierOffset(), cell.getQualifierLength()));
            valueBuilder.setValue(UnsafeByteOperations.unsafeWrap(cell.getValueArray(), cell.getValueOffset(), cell.getValueLength()));
            valueBuilder.setTimestamp(cell.getTimestamp());
            if (type == MutationType.DELETE || (type == MutationType.PUT && CellUtil.isDelete(cell))) {
                KeyValue.Type keyValueType = KeyValue.Type.codeToType(cell.getTypeByte());
                valueBuilder.setDeleteType(toDeleteType(keyValueType));
            }
            columnBuilder.addQualifierValue(valueBuilder.build());
        }
        builder.addColumnValue(columnBuilder.build());
    }
    return builder.build();
}
Also used : Append(org.apache.hadoop.hbase.client.Append) KeyValue(org.apache.hadoop.hbase.KeyValue) Increment(org.apache.hadoop.hbase.client.Increment) QualifierValue(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue) ColumnValue(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue) ArrayList(java.util.ArrayList) List(java.util.List) Map(java.util.Map) HashMap(java.util.HashMap) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell)

Aggregations

Append (org.apache.hadoop.hbase.client.Append)62 Test (org.junit.Test)31 Result (org.apache.hadoop.hbase.client.Result)26 Increment (org.apache.hadoop.hbase.client.Increment)25 Put (org.apache.hadoop.hbase.client.Put)23 IOException (java.io.IOException)17 Get (org.apache.hadoop.hbase.client.Get)17 Delete (org.apache.hadoop.hbase.client.Delete)16 Table (org.apache.hadoop.hbase.client.Table)15 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)10 TableName (org.apache.hadoop.hbase.TableName)10 RowMutations (org.apache.hadoop.hbase.client.RowMutations)10 Cell (org.apache.hadoop.hbase.Cell)9 CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ArrayList (java.util.ArrayList)5 CheckAndMutate (org.apache.hadoop.hbase.client.CheckAndMutate)5 MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)5 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)5 List (java.util.List)4