Search in sources :

Example 11 with CheckAndMutateResult

use of org.apache.hadoop.hbase.client.CheckAndMutateResult in project hbase by apache.

the class TestHRegion method testCheckAndMutateWithWrongValue.

@Test
public void testCheckAndMutateWithWrongValue() throws IOException {
    byte[] row1 = Bytes.toBytes("row1");
    byte[] fam1 = Bytes.toBytes("fam1");
    byte[] qf1 = Bytes.toBytes("qualifier");
    byte[] val1 = Bytes.toBytes("value1");
    byte[] val2 = Bytes.toBytes("value2");
    BigDecimal bd1 = new BigDecimal(Double.MAX_VALUE);
    BigDecimal bd2 = new BigDecimal(Double.MIN_VALUE);
    // Setting up region
    this.region = initHRegion(tableName, method, CONF, fam1);
    // Putting data in key
    Put put = new Put(row1);
    put.addColumn(fam1, qf1, val1);
    region.put(put);
    // checkAndPut with wrong value
    CheckAndMutateResult res = region.checkAndMutate(CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(put));
    assertFalse(res.isSuccess());
    assertNull(res.getResult());
    // checkAndDelete with wrong value
    Delete delete = new Delete(row1);
    delete.addFamily(fam1);
    res = region.checkAndMutate(CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, val2).build(put));
    assertFalse(res.isSuccess());
    assertNull(res.getResult());
    // Putting data in key
    put = new Put(row1);
    put.addColumn(fam1, qf1, Bytes.toBytes(bd1));
    region.put(put);
    // checkAndPut with wrong value
    res = region.checkAndMutate(CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, Bytes.toBytes(bd2)).build(put));
    assertFalse(res.isSuccess());
    assertNull(res.getResult());
    // checkAndDelete with wrong value
    delete = new Delete(row1);
    delete.addFamily(fam1);
    res = region.checkAndMutate(CheckAndMutate.newBuilder(row1).ifMatches(fam1, qf1, CompareOperator.EQUAL, Bytes.toBytes(bd2)).build(delete));
    assertFalse(res.isSuccess());
    assertNull(res.getResult());
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) BigDecimal(java.math.BigDecimal) Put(org.apache.hadoop.hbase.client.Put) Test(org.junit.Test)

Example 12 with CheckAndMutateResult

use of org.apache.hadoop.hbase.client.CheckAndMutateResult in project hbase by apache.

the class HRegion method checkAndMutateInternal.

private CheckAndMutateResult checkAndMutateInternal(CheckAndMutate checkAndMutate, long nonceGroup, long nonce) throws IOException {
    byte[] row = checkAndMutate.getRow();
    Filter filter = null;
    byte[] family = null;
    byte[] qualifier = null;
    CompareOperator op = null;
    ByteArrayComparable comparator = null;
    if (checkAndMutate.hasFilter()) {
        filter = checkAndMutate.getFilter();
    } else {
        family = checkAndMutate.getFamily();
        qualifier = checkAndMutate.getQualifier();
        op = checkAndMutate.getCompareOp();
        comparator = new BinaryComparator(checkAndMutate.getValue());
    }
    TimeRange timeRange = checkAndMutate.getTimeRange();
    Mutation mutation = null;
    RowMutations rowMutations = null;
    if (checkAndMutate.getAction() instanceof Mutation) {
        mutation = (Mutation) checkAndMutate.getAction();
    } else {
        rowMutations = (RowMutations) checkAndMutate.getAction();
    }
    if (mutation != null) {
        checkMutationType(mutation);
        checkRow(mutation, row);
    } else {
        checkRow(rowMutations, row);
    }
    checkReadOnly();
    // TODO, add check for value length also move this check to the client
    checkResources();
    startRegionOperation();
    try {
        Get get = new Get(row);
        if (family != null) {
            checkFamily(family);
            get.addColumn(family, qualifier);
        }
        if (filter != null) {
            get.setFilter(filter);
        }
        if (timeRange != null) {
            get.setTimeRange(timeRange.getMin(), timeRange.getMax());
        }
        // Lock row - note that doBatchMutate will relock this row if called
        checkRow(row, "doCheckAndRowMutate");
        RowLock rowLock = getRowLock(get.getRow(), false, null);
        try {
            if (this.getCoprocessorHost() != null) {
                CheckAndMutateResult result = getCoprocessorHost().preCheckAndMutateAfterRowLock(checkAndMutate);
                if (result != null) {
                    return result;
                }
            }
            // NOTE: We used to wait here until mvcc caught up: mvcc.await();
            // Supposition is that now all changes are done under row locks, then when we go to read,
            // we'll get the latest on this row.
            boolean matches = false;
            long cellTs = 0;
            try (RegionScanner scanner = getScanner(new Scan(get))) {
                // NOTE: Please don't use HRegion.get() instead,
                // because it will copy cells to heap. See HBASE-26036
                List<Cell> result = new ArrayList<>(1);
                scanner.next(result);
                if (filter != null) {
                    if (!result.isEmpty()) {
                        matches = true;
                        cellTs = result.get(0).getTimestamp();
                    }
                } else {
                    boolean valueIsNull = comparator.getValue() == null || comparator.getValue().length == 0;
                    if (result.isEmpty() && valueIsNull) {
                        matches = op != CompareOperator.NOT_EQUAL;
                    } else if (result.size() > 0 && valueIsNull) {
                        matches = (result.get(0).getValueLength() == 0) == (op != CompareOperator.NOT_EQUAL);
                        cellTs = result.get(0).getTimestamp();
                    } else if (result.size() == 1) {
                        Cell kv = result.get(0);
                        cellTs = kv.getTimestamp();
                        int compareResult = PrivateCellUtil.compareValue(kv, comparator);
                        matches = matches(op, compareResult);
                    }
                }
            }
            // If matches, perform the mutation or the rowMutations
            if (matches) {
                // We have acquired the row lock already. If the system clock is NOT monotonically
                // non-decreasing (see HBASE-14070) we should make sure that the mutation has a
                // larger timestamp than what was observed via Get. doBatchMutate already does this, but
                // there is no way to pass the cellTs. See HBASE-14054.
                long now = EnvironmentEdgeManager.currentTime();
                // ensure write is not eclipsed
                long ts = Math.max(now, cellTs);
                byte[] byteTs = Bytes.toBytes(ts);
                if (mutation != null) {
                    if (mutation instanceof Put) {
                        updateCellTimestamps(mutation.getFamilyCellMap().values(), byteTs);
                    }
                // And else 'delete' is not needed since it already does a second get, and sets the
                // timestamp from get (see prepareDeleteTimestamps).
                } else {
                    for (Mutation m : rowMutations.getMutations()) {
                        if (m instanceof Put) {
                            updateCellTimestamps(m.getFamilyCellMap().values(), byteTs);
                        }
                    }
                // And else 'delete' is not needed since it already does a second get, and sets the
                // timestamp from get (see prepareDeleteTimestamps).
                }
                // All edits for the given row (across all column families) must happen atomically.
                Result r;
                if (mutation != null) {
                    r = mutate(mutation, true, nonceGroup, nonce).getResult();
                } else {
                    r = mutateRow(rowMutations, nonceGroup, nonce);
                }
                this.checkAndMutateChecksPassed.increment();
                return new CheckAndMutateResult(true, r);
            }
            this.checkAndMutateChecksFailed.increment();
            return new CheckAndMutateResult(false, null);
        } finally {
            rowLock.release();
        }
    } finally {
        closeRegionOperation();
    }
}
Also used : CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) ArrayList(java.util.ArrayList) BinaryComparator(org.apache.hadoop.hbase.filter.BinaryComparator) Put(org.apache.hadoop.hbase.client.Put) RowMutations(org.apache.hadoop.hbase.client.RowMutations) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) CompareOperator(org.apache.hadoop.hbase.CompareOperator) TimeRange(org.apache.hadoop.hbase.io.TimeRange) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) Filter(org.apache.hadoop.hbase.filter.Filter) Get(org.apache.hadoop.hbase.client.Get) Scan(org.apache.hadoop.hbase.client.Scan) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell)

Example 13 with CheckAndMutateResult

use of org.apache.hadoop.hbase.client.CheckAndMutateResult in project hbase by apache.

the class RSRpcServices method mutate.

/**
 * Mutate data in a table.
 *
 * @param rpcc the RPC controller
 * @param request the mutate request
 */
@Override
public MutateResponse mutate(final RpcController rpcc, final MutateRequest request) throws ServiceException {
    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
    // It is also the conduit via which we pass back data.
    HBaseRpcController controller = (HBaseRpcController) rpcc;
    CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
    OperationQuota quota = null;
    RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
    // Clear scanner so we are not holding on to reference across call.
    if (controller != null) {
        controller.setCellScanner(null);
    }
    try {
        checkOpen();
        requestCount.increment();
        rpcMutateRequestCount.increment();
        HRegion region = getRegion(request.getRegion());
        rejectIfInStandByState(region);
        MutateResponse.Builder builder = MutateResponse.newBuilder();
        MutationProto mutation = request.getMutation();
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
        quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
        ActivePolicyEnforcement spaceQuotaEnforcement = getSpaceQuotaManager().getActiveEnforcements();
        if (request.hasCondition()) {
            CheckAndMutateResult result = checkAndMutate(region, quota, mutation, cellScanner, request.getCondition(), nonceGroup, spaceQuotaEnforcement);
            builder.setProcessed(result.isSuccess());
            boolean clientCellBlockSupported = isClientCellBlockSupport(context);
            addResult(builder, result.getResult(), controller, clientCellBlockSupported);
            if (clientCellBlockSupported) {
                addSize(context, result.getResult(), null);
            }
        } else {
            Result r = null;
            Boolean processed = null;
            MutationType type = mutation.getMutateType();
            switch(type) {
                case APPEND:
                    // TODO: this doesn't actually check anything.
                    r = append(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement);
                    break;
                case INCREMENT:
                    // TODO: this doesn't actually check anything.
                    r = increment(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement);
                    break;
                case PUT:
                    put(region, quota, mutation, cellScanner, spaceQuotaEnforcement);
                    processed = Boolean.TRUE;
                    break;
                case DELETE:
                    delete(region, quota, mutation, cellScanner, spaceQuotaEnforcement);
                    processed = Boolean.TRUE;
                    break;
                default:
                    throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
            }
            if (processed != null) {
                builder.setProcessed(processed);
            }
            boolean clientCellBlockSupported = isClientCellBlockSupport(context);
            addResult(builder, r, controller, clientCellBlockSupported);
            if (clientCellBlockSupported) {
                addSize(context, r, null);
            }
        }
        return builder.build();
    } catch (IOException ie) {
        server.checkFileSystem();
        throw new ServiceException(ie);
    } finally {
        if (quota != null) {
            quota.close();
        }
    }
}
Also used : RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) ActivePolicyEnforcement(org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) UncheckedIOException(java.io.UncheckedIOException) CellScanner(org.apache.hadoop.hbase.CellScanner) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) MutateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Example 14 with CheckAndMutateResult

use of org.apache.hadoop.hbase.client.CheckAndMutateResult in project hbase by apache.

the class RSRpcServices method checkAndMutate.

private CheckAndMutateResult checkAndMutate(HRegion region, List<ClientProtos.Action> actions, CellScanner cellScanner, Condition condition, long nonceGroup, ActivePolicyEnforcement spaceQuotaEnforcement) throws IOException {
    int countOfCompleteMutation = 0;
    try {
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        List<Mutation> mutations = new ArrayList<>();
        long nonce = HConstants.NO_NONCE;
        for (ClientProtos.Action action : actions) {
            if (action.hasGet()) {
                throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
            }
            MutationProto mutation = action.getMutation();
            MutationType type = mutation.getMutateType();
            switch(type) {
                case PUT:
                    Put put = ProtobufUtil.toPut(mutation, cellScanner);
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, put);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(put);
                    mutations.add(put);
                    break;
                case DELETE:
                    Delete del = ProtobufUtil.toDelete(mutation, cellScanner);
                    ++countOfCompleteMutation;
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(del);
                    mutations.add(del);
                    break;
                case INCREMENT:
                    Increment increment = ProtobufUtil.toIncrement(mutation, cellScanner);
                    nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, increment);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(increment);
                    mutations.add(increment);
                    break;
                case APPEND:
                    Append append = ProtobufUtil.toAppend(mutation, cellScanner);
                    nonce = mutation.hasNonce() ? mutation.getNonce() : HConstants.NO_NONCE;
                    ++countOfCompleteMutation;
                    checkCellSizeLimit(region, append);
                    spaceQuotaEnforcement.getPolicyEnforcement(region).check(append);
                    mutations.add(append);
                    break;
                default:
                    throw new DoNotRetryIOException("invalid mutation type : " + type);
            }
        }
        if (mutations.size() == 0) {
            return new CheckAndMutateResult(true, null);
        } else {
            CheckAndMutate checkAndMutate = ProtobufUtil.toCheckAndMutate(condition, mutations);
            CheckAndMutateResult result = null;
            if (region.getCoprocessorHost() != null) {
                result = region.getCoprocessorHost().preCheckAndMutate(checkAndMutate);
            }
            if (result == null) {
                result = region.checkAndMutate(checkAndMutate, nonceGroup, nonce);
                if (region.getCoprocessorHost() != null) {
                    result = region.getCoprocessorHost().postCheckAndMutate(checkAndMutate, result);
                }
            }
            return result;
        }
    } finally {
        // even if the malformed cells are not skipped.
        for (int i = countOfCompleteMutation; i < actions.size(); ++i) {
            skipCellsForMutation(actions.get(i), cellScanner);
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) ArrayList(java.util.ArrayList) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) CheckAndMutate(org.apache.hadoop.hbase.client.CheckAndMutate) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) Put(org.apache.hadoop.hbase.client.Put) Append(org.apache.hadoop.hbase.client.Append) Increment(org.apache.hadoop.hbase.client.Increment) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 15 with CheckAndMutateResult

use of org.apache.hadoop.hbase.client.CheckAndMutateResult in project hbase by apache.

the class RegionCoprocessorHost method preCheckAndMutate.

/**
 * Supports Coprocessor 'bypass'.
 * @param checkAndMutate the CheckAndMutate object
 * @return true or false to return to client if default processing should be bypassed, or null
 *   otherwise
 * @throws IOException if an error occurred on the coprocessor
 */
public CheckAndMutateResult preCheckAndMutate(CheckAndMutate checkAndMutate) throws IOException {
    boolean bypassable = true;
    CheckAndMutateResult defaultResult = new CheckAndMutateResult(false, null);
    if (coprocEnvironments.isEmpty()) {
        return null;
    }
    return execOperationWithResult(new ObserverOperationWithResult<RegionObserver, CheckAndMutateResult>(regionObserverGetter, defaultResult, bypassable) {

        @Override
        public CheckAndMutateResult call(RegionObserver observer) throws IOException {
            return observer.preCheckAndMutate(this, checkAndMutate, getResult());
        }
    });
}
Also used : CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) IOException(java.io.IOException) RegionObserver(org.apache.hadoop.hbase.coprocessor.RegionObserver)

Aggregations

CheckAndMutateResult (org.apache.hadoop.hbase.client.CheckAndMutateResult)20 Put (org.apache.hadoop.hbase.client.Put)13 Test (org.junit.Test)12 Result (org.apache.hadoop.hbase.client.Result)10 Get (org.apache.hadoop.hbase.client.Get)9 Delete (org.apache.hadoop.hbase.client.Delete)8 Mutation (org.apache.hadoop.hbase.client.Mutation)5 RowMutations (org.apache.hadoop.hbase.client.RowMutations)5 IOException (java.io.IOException)4 Append (org.apache.hadoop.hbase.client.Append)4 CheckAndMutate (org.apache.hadoop.hbase.client.CheckAndMutate)4 Increment (org.apache.hadoop.hbase.client.Increment)4 SingleColumnValueFilter (org.apache.hadoop.hbase.filter.SingleColumnValueFilter)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 FilterList (org.apache.hadoop.hbase.filter.FilterList)3 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)3 UncheckedIOException (java.io.UncheckedIOException)2 BigDecimal (java.math.BigDecimal)2 ArrayList (java.util.ArrayList)2 Cell (org.apache.hadoop.hbase.Cell)2