Search in sources :

Example 11 with MutationType

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.

the class RSRpcServices method mutateRows.

/**
   * Mutate a list of rows atomically.
   *
   * @param region
   * @param actions
   * @param cellScanner if non-null, the mutation data -- the Cell content.
   * @throws IOException
   */
private void mutateRows(final Region region, final List<ClientProtos.Action> actions, final CellScanner cellScanner, RegionActionResult.Builder builder) throws IOException {
    if (!region.getRegionInfo().isMetaTable()) {
        regionServer.cacheFlusher.reclaimMemStoreMemory();
    }
    RowMutations rm = null;
    int i = 0;
    ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder = ClientProtos.ResultOrException.newBuilder();
    for (ClientProtos.Action action : actions) {
        if (action.hasGet()) {
            throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
        }
        MutationType type = action.getMutation().getMutateType();
        if (rm == null) {
            rm = new RowMutations(action.getMutation().getRow().toByteArray(), actions.size());
        }
        switch(type) {
            case PUT:
                rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
                break;
            case DELETE:
                rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
                break;
            default:
                throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
        }
        // To unify the response format with doNonAtomicRegionMutation and read through client's
        // AsyncProcess we have to add an empty result instance per operation
        resultOrExceptionOrBuilder.clear();
        resultOrExceptionOrBuilder.setIndex(i++);
        builder.addResultOrException(resultOrExceptionOrBuilder.build());
    }
    region.mutateRow(rm);
}
Also used : MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos) RowMutations(org.apache.hadoop.hbase.client.RowMutations)

Example 12 with MutationType

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.

the class RSRpcServices method mutate.

/**
   * Mutate data in a table.
   *
   * @param rpcc the RPC controller
   * @param request the mutate request
   * @throws ServiceException
   */
@Override
public MutateResponse mutate(final RpcController rpcc, final MutateRequest request) throws ServiceException {
    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
    // It is also the conduit via which we pass back data.
    HBaseRpcController controller = (HBaseRpcController) rpcc;
    CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
    OperationQuota quota = null;
    RpcCallContext context = RpcServer.getCurrentCall();
    // Clear scanner so we are not holding on to reference across call.
    if (controller != null) {
        controller.setCellScanner(null);
    }
    try {
        checkOpen();
        requestCount.increment();
        rpcMutateRequestCount.increment();
        Region region = getRegion(request.getRegion());
        MutateResponse.Builder builder = MutateResponse.newBuilder();
        MutationProto mutation = request.getMutation();
        if (!region.getRegionInfo().isMetaTable()) {
            regionServer.cacheFlusher.reclaimMemStoreMemory();
        }
        long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
        Result r = null;
        Boolean processed = null;
        MutationType type = mutation.getMutateType();
        quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
        switch(type) {
            case APPEND:
                // TODO: this doesn't actually check anything.
                r = append(region, quota, mutation, cellScanner, nonceGroup);
                break;
            case INCREMENT:
                // TODO: this doesn't actually check anything.
                r = increment(region, quota, mutation, cellScanner, nonceGroup);
                break;
            case PUT:
                Put put = ProtobufUtil.toPut(mutation, cellScanner);
                quota.addMutation(put);
                if (request.hasCondition()) {
                    Condition condition = request.getCondition();
                    byte[] row = condition.getRow().toByteArray();
                    byte[] family = condition.getFamily().toByteArray();
                    byte[] qualifier = condition.getQualifier().toByteArray();
                    CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
                    ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
                    if (region.getCoprocessorHost() != null) {
                        processed = region.getCoprocessorHost().preCheckAndPut(row, family, qualifier, compareOp, comparator, put);
                    }
                    if (processed == null) {
                        boolean result = region.checkAndMutate(row, family, qualifier, compareOp, comparator, put, true);
                        if (region.getCoprocessorHost() != null) {
                            result = region.getCoprocessorHost().postCheckAndPut(row, family, qualifier, compareOp, comparator, put, result);
                        }
                        processed = result;
                    }
                } else {
                    region.put(put);
                    processed = Boolean.TRUE;
                }
                break;
            case DELETE:
                Delete delete = ProtobufUtil.toDelete(mutation, cellScanner);
                quota.addMutation(delete);
                if (request.hasCondition()) {
                    Condition condition = request.getCondition();
                    byte[] row = condition.getRow().toByteArray();
                    byte[] family = condition.getFamily().toByteArray();
                    byte[] qualifier = condition.getQualifier().toByteArray();
                    CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
                    ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
                    if (region.getCoprocessorHost() != null) {
                        processed = region.getCoprocessorHost().preCheckAndDelete(row, family, qualifier, compareOp, comparator, delete);
                    }
                    if (processed == null) {
                        boolean result = region.checkAndMutate(row, family, qualifier, compareOp, comparator, delete, true);
                        if (region.getCoprocessorHost() != null) {
                            result = region.getCoprocessorHost().postCheckAndDelete(row, family, qualifier, compareOp, comparator, delete, result);
                        }
                        processed = result;
                    }
                } else {
                    region.delete(delete);
                    processed = Boolean.TRUE;
                }
                break;
            default:
                throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
        }
        if (processed != null) {
            builder.setProcessed(processed.booleanValue());
        }
        boolean clientCellBlockSupported = isClientCellBlockSupport(context);
        addResult(builder, r, controller, clientCellBlockSupported);
        if (clientCellBlockSupported) {
            addSize(context, r, null);
        }
        return builder.build();
    } catch (IOException ie) {
        regionServer.checkFileSystem();
        throw new ServiceException(ie);
    } finally {
        if (quota != null) {
            quota.close();
        }
    }
}
Also used : RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) Condition(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition) Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) CellScanner(org.apache.hadoop.hbase.CellScanner) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) Put(org.apache.hadoop.hbase.client.Put) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) MutateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)

Example 13 with MutationType

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.

the class MultiThreadedAction method parseMutateInfo.

// Parse mutate info into a map of <column name> => <update action>
private Map<String, MutationType> parseMutateInfo(byte[] mutateInfo) {
    Map<String, MutationType> mi = new HashMap<>();
    if (mutateInfo != null) {
        String mutateInfoStr = Bytes.toString(mutateInfo);
        String[] mutations = mutateInfoStr.split("#");
        for (String mutation : mutations) {
            if (mutation.isEmpty())
                continue;
            Preconditions.checkArgument(mutation.contains(":"), "Invalid mutation info " + mutation);
            int p = mutation.indexOf(":");
            String column = mutation.substring(0, p);
            MutationType type = MutationType.valueOf(Integer.parseInt(mutation.substring(p + 1)));
            mi.put(column, type);
        }
    }
    return mi;
}
Also used : MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) HashMap(java.util.HashMap)

Example 14 with MutationType

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType in project hbase by apache.

the class MultiThreadedAction method verifyResultAgainstDataGenerator.

/**
   * Verifies the result from get or scan using the dataGenerator (that was presumably
   * also used to generate said result).
   * @param verifyValues verify that values in the result make sense for row/cf/column combination
   * @param verifyCfAndColumnIntegrity verify that cf/column set in the result is complete. Note
   *                                   that to use this multiPut should be used, or verification
   *                                   has to happen after writes, otherwise there can be races.
   * @return
   */
public boolean verifyResultAgainstDataGenerator(Result result, boolean verifyValues, boolean verifyCfAndColumnIntegrity) {
    String rowKeyStr = Bytes.toString(result.getRow());
    // See if we have any data at all.
    if (result.isEmpty()) {
        LOG.error("Error checking data for key [" + rowKeyStr + "], no data returned");
        printLocations(result);
        return false;
    }
    if (!verifyValues && !verifyCfAndColumnIntegrity) {
        // as long as we have something, we are good.
        return true;
    }
    // See if we have all the CFs.
    byte[][] expectedCfs = dataGenerator.getColumnFamilies();
    if (verifyCfAndColumnIntegrity && (expectedCfs.length != result.getMap().size())) {
        LOG.error("Error checking data for key [" + rowKeyStr + "], bad family count: " + result.getMap().size());
        printLocations(result);
        return false;
    }
    // Verify each column family from get in the result.
    for (byte[] cf : result.getMap().keySet()) {
        String cfStr = Bytes.toString(cf);
        Map<byte[], byte[]> columnValues = result.getFamilyMap(cf);
        if (columnValues == null) {
            LOG.error("Error checking data for key [" + rowKeyStr + "], no data for family [" + cfStr + "]]");
            printLocations(result);
            return false;
        }
        Map<String, MutationType> mutateInfo = null;
        if (verifyCfAndColumnIntegrity || verifyValues) {
            if (!columnValues.containsKey(MUTATE_INFO)) {
                LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + Bytes.toString(MUTATE_INFO) + "]; value is not found");
                printLocations(result);
                return false;
            }
            long cfHash = Arrays.hashCode(cf);
            // Verify deleted columns, and make up column counts if deleted
            byte[] mutateInfoValue = columnValues.remove(MUTATE_INFO);
            mutateInfo = parseMutateInfo(mutateInfoValue);
            for (Map.Entry<String, MutationType> mutate : mutateInfo.entrySet()) {
                if (mutate.getValue() == MutationType.DELETE) {
                    byte[] column = Bytes.toBytes(mutate.getKey());
                    long columnHash = Arrays.hashCode(column);
                    long hashCode = cfHash + columnHash;
                    if (hashCode % 2 == 0) {
                        if (columnValues.containsKey(column)) {
                            LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + mutate.getKey() + "]; should be deleted");
                            printLocations(result);
                            return false;
                        }
                        byte[] hashCodeBytes = Bytes.toBytes(hashCode);
                        columnValues.put(column, hashCodeBytes);
                    }
                }
            }
            // Verify increment
            if (!columnValues.containsKey(INCREMENT)) {
                LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + Bytes.toString(INCREMENT) + "]; value is not found");
                printLocations(result);
                return false;
            }
            long currentValue = Bytes.toLong(columnValues.remove(INCREMENT));
            if (verifyValues) {
                long amount = mutateInfo.isEmpty() ? 0 : cfHash;
                long originalValue = Arrays.hashCode(result.getRow());
                long extra = currentValue - originalValue;
                if (extra != 0 && (amount == 0 || extra % amount != 0)) {
                    LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [increment], extra [" + extra + "], amount [" + amount + "]");
                    printLocations(result);
                    return false;
                }
                if (amount != 0 && extra != amount) {
                    LOG.warn("Warning checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [increment], incremented [" + (extra / amount) + "] times");
                }
            }
            // See if we have correct columns.
            if (verifyCfAndColumnIntegrity && !dataGenerator.verify(result.getRow(), cf, columnValues.keySet())) {
                String colsStr = "";
                for (byte[] col : columnValues.keySet()) {
                    if (colsStr.length() > 0) {
                        colsStr += ", ";
                    }
                    colsStr += "[" + Bytes.toString(col) + "]";
                }
                LOG.error("Error checking data for key [" + rowKeyStr + "], bad columns for family [" + cfStr + "]: " + colsStr);
                printLocations(result);
                return false;
            }
            // See if values check out.
            if (verifyValues) {
                for (Map.Entry<byte[], byte[]> kv : columnValues.entrySet()) {
                    String column = Bytes.toString(kv.getKey());
                    MutationType mutation = mutateInfo.get(column);
                    boolean verificationNeeded = true;
                    byte[] bytes = kv.getValue();
                    if (mutation != null) {
                        boolean mutationVerified = true;
                        long columnHash = Arrays.hashCode(kv.getKey());
                        long hashCode = cfHash + columnHash;
                        byte[] hashCodeBytes = Bytes.toBytes(hashCode);
                        if (mutation == MutationType.APPEND) {
                            int offset = bytes.length - hashCodeBytes.length;
                            mutationVerified = offset > 0 && Bytes.equals(hashCodeBytes, 0, hashCodeBytes.length, bytes, offset, hashCodeBytes.length);
                            if (mutationVerified) {
                                int n = 1;
                                while (true) {
                                    int newOffset = offset - hashCodeBytes.length;
                                    if (newOffset < 0 || !Bytes.equals(hashCodeBytes, 0, hashCodeBytes.length, bytes, newOffset, hashCodeBytes.length)) {
                                        break;
                                    }
                                    offset = newOffset;
                                    n++;
                                }
                                if (n > 1) {
                                    LOG.warn("Warning checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + column + "], appended [" + n + "] times");
                                }
                                byte[] dest = new byte[offset];
                                System.arraycopy(bytes, 0, dest, 0, offset);
                                bytes = dest;
                            }
                        } else if (hashCode % 2 == 0) {
                            // checkAndPut
                            mutationVerified = Bytes.equals(bytes, hashCodeBytes);
                            verificationNeeded = false;
                        }
                        if (!mutationVerified) {
                            LOG.error("Error checking data for key [" + rowKeyStr + "], mutation checking failed for column family [" + cfStr + "], column [" + column + "]; mutation [" + mutation + "], hashCode [" + hashCode + "], verificationNeeded [" + verificationNeeded + "]");
                            printLocations(result);
                            return false;
                        }
                    }
                    // end of mutation checking
                    if (verificationNeeded && !dataGenerator.verify(result.getRow(), cf, kv.getKey(), bytes)) {
                        LOG.error("Error checking data for key [" + rowKeyStr + "], column family [" + cfStr + "], column [" + column + "], mutation [" + mutation + "]; value of length " + bytes.length);
                        printLocations(result);
                        return false;
                    }
                }
            }
        }
    }
    return true;
}
Also used : MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) HashMap(java.util.HashMap) Map(java.util.Map)

Aggregations

MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)14 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)12 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteBufferCell (org.apache.hadoop.hbase.ByteBufferCell)5 Cell (org.apache.hadoop.hbase.Cell)5 Delete (org.apache.hadoop.hbase.client.Delete)5 Put (org.apache.hadoop.hbase.client.Put)5 ColumnValue (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue)5 QualifierValue (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue)5 ClientProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)4 MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)4 Mutation (org.apache.hadoop.hbase.client.Mutation)3 Action (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action)3 RegionAction (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction)3 ResultOrException (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException)3 IOException (java.io.IOException)2 InterruptedIOException (java.io.InterruptedIOException)2 HashMap (java.util.HashMap)2 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)2 Action (org.apache.hadoop.hbase.client.Action)2