Search in sources :

Example 1 with Action

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action in project hbase by apache.

the class RSRpcServices method doNonAtomicRegionMutation.

/**
   * Run through the regionMutation <code>rm</code> and per Mutation, do the work, and then when
   * done, add an instance of a {@link ResultOrException} that corresponds to each Mutation.
   * @param region
   * @param actions
   * @param cellScanner
   * @param builder
   * @param cellsToReturn  Could be null. May be allocated in this method.  This is what this
   * method returns as a 'result'.
   * @param closeCallBack the callback to be used with multigets
   * @param context the current RpcCallContext
   * @return Return the <code>cellScanner</code> passed
   */
private List<CellScannable> doNonAtomicRegionMutation(final Region region, final OperationQuota quota, final RegionAction actions, final CellScanner cellScanner, final RegionActionResult.Builder builder, List<CellScannable> cellsToReturn, long nonceGroup, final RegionScannersCloseCallBack closeCallBack, RpcCallContext context) {
    // Gather up CONTIGUOUS Puts and Deletes in this mutations List.  Idea is that rather than do
    // one at a time, we instead pass them in batch.  Be aware that the corresponding
    // ResultOrException instance that matches each Put or Delete is then added down in the
    // doBatchOp call.  We should be staying aligned though the Put and Delete are deferred/batched
    List<ClientProtos.Action> mutations = null;
    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
    IOException sizeIOE = null;
    Object lastBlock = null;
    ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = ResultOrException.newBuilder();
    boolean hasResultOrException = false;
    for (ClientProtos.Action action : actions.getActionList()) {
        hasResultOrException = false;
        resultOrExceptionBuilder.clear();
        try {
            Result r = null;
            if (context != null && context.isRetryImmediatelySupported() && (context.getResponseCellSize() > maxQuotaResultSize || context.getResponseBlockSize() + context.getResponseExceptionSize() > maxQuotaResultSize)) {
                // change after the response size limit is reached.
                if (sizeIOE == null) {
                    // We don't need the stack un-winding do don't throw the exception.
                    // Throwing will kill the JVM's JIT.
                    //
                    // Instead just create the exception and then store it.
                    sizeIOE = new MultiActionResultTooLarge("Max size exceeded" + " CellSize: " + context.getResponseCellSize() + " BlockSize: " + context.getResponseBlockSize());
                    // Only report the exception once since there's only one request that
                    // caused the exception. Otherwise this number will dominate the exceptions count.
                    rpcServer.getMetrics().exception(sizeIOE);
                }
                // Now that there's an exception is known to be created
                // use it for the response.
                //
                // This will create a copy in the builder.
                hasResultOrException = true;
                NameBytesPair pair = ResponseConverter.buildException(sizeIOE);
                resultOrExceptionBuilder.setException(pair);
                context.incrementResponseExceptionSize(pair.getSerializedSize());
                resultOrExceptionBuilder.setIndex(action.getIndex());
                builder.addResultOrException(resultOrExceptionBuilder.build());
                if (cellScanner != null) {
                    skipCellsForMutation(action, cellScanner);
                }
                continue;
            }
            if (action.hasGet()) {
                long before = EnvironmentEdgeManager.currentTime();
                try {
                    Get get = ProtobufUtil.toGet(action.getGet());
                    if (context != null) {
                        r = get(get, ((HRegion) region), closeCallBack, context);
                    } else {
                        r = region.get(get);
                    }
                } finally {
                    if (regionServer.metricsRegionServer != null) {
                        regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before);
                    }
                }
            } else if (action.hasServiceCall()) {
                hasResultOrException = true;
                try {
                    com.google.protobuf.Message result = execServiceOnRegion(region, action.getServiceCall());
                    ClientProtos.CoprocessorServiceResult.Builder serviceResultBuilder = ClientProtos.CoprocessorServiceResult.newBuilder();
                    resultOrExceptionBuilder.setServiceResult(serviceResultBuilder.setValue(serviceResultBuilder.getValueBuilder().setName(result.getClass().getName()).setValue(UnsafeByteOperations.unsafeWrap(result.toByteArray()))));
                } catch (IOException ioe) {
                    rpcServer.getMetrics().exception(ioe);
                    NameBytesPair pair = ResponseConverter.buildException(ioe);
                    resultOrExceptionBuilder.setException(pair);
                    context.incrementResponseExceptionSize(pair.getSerializedSize());
                }
            } else if (action.hasMutation()) {
                MutationType type = action.getMutation().getMutateType();
                if (type != MutationType.PUT && type != MutationType.DELETE && mutations != null && !mutations.isEmpty()) {
                    // Flush out any Puts or Deletes already collected.
                    doBatchOp(builder, region, quota, mutations, cellScanner);
                    mutations.clear();
                }
                switch(type) {
                    case APPEND:
                        r = append(region, quota, action.getMutation(), cellScanner, nonceGroup);
                        break;
                    case INCREMENT:
                        r = increment(region, quota, action.getMutation(), cellScanner, nonceGroup);
                        break;
                    case PUT:
                    case DELETE:
                        // Collect the individual mutations and apply in a batch
                        if (mutations == null) {
                            mutations = new ArrayList<>(actions.getActionCount());
                        }
                        mutations.add(action);
                        break;
                    default:
                        throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
                }
            } else {
                throw new HBaseIOException("Unexpected Action type");
            }
            if (r != null) {
                ClientProtos.Result pbResult = null;
                if (isClientCellBlockSupport(context)) {
                    pbResult = ProtobufUtil.toResultNoData(r);
                    //  Hard to guess the size here.  Just make a rough guess.
                    if (cellsToReturn == null) {
                        cellsToReturn = new ArrayList<>();
                    }
                    cellsToReturn.add(r);
                } else {
                    pbResult = ProtobufUtil.toResult(r);
                }
                lastBlock = addSize(context, r, lastBlock);
                hasResultOrException = true;
                resultOrExceptionBuilder.setResult(pbResult);
            }
        // Could get to here and there was no result and no exception.  Presumes we added
        // a Put or Delete to the collecting Mutations List for adding later.  In this
        // case the corresponding ResultOrException instance for the Put or Delete will be added
        // down in the doBatchOp method call rather than up here.
        } catch (IOException ie) {
            rpcServer.getMetrics().exception(ie);
            hasResultOrException = true;
            NameBytesPair pair = ResponseConverter.buildException(ie);
            resultOrExceptionBuilder.setException(pair);
            context.incrementResponseExceptionSize(pair.getSerializedSize());
        }
        if (hasResultOrException) {
            // Propagate index.
            resultOrExceptionBuilder.setIndex(action.getIndex());
            builder.addResultOrException(resultOrExceptionBuilder.build());
        }
    }
    // Finish up any outstanding mutations
    if (mutations != null && !mutations.isEmpty()) {
        doBatchOp(builder, region, quota, mutations, cellScanner);
    }
    return cellsToReturn;
}
Also used : MultiActionResultTooLarge(org.apache.hadoop.hbase.MultiActionResultTooLarge) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) Message(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair) Get(org.apache.hadoop.hbase.client.Get) MutableObject(org.apache.commons.lang.mutable.MutableObject) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 2 with Action

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action in project hbase by apache.

the class RSRpcServices method checkAndRowMutate.

/**
   * Mutate a list of rows atomically.
   *
   * @param region
   * @param actions
   * @param cellScanner if non-null, the mutation data -- the Cell content.
   * @param row
   * @param family
   * @param qualifier
   * @param compareOp
   * @param comparator @throws IOException
   */
private boolean checkAndRowMutate(final Region region, final List<ClientProtos.Action> actions, final CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, ByteArrayComparable comparator, RegionActionResult.Builder builder) throws IOException {
    if (!region.getRegionInfo().isMetaTable()) {
        regionServer.cacheFlusher.reclaimMemStoreMemory();
    }
    RowMutations rm = null;
    int i = 0;
    ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder = ClientProtos.ResultOrException.newBuilder();
    for (ClientProtos.Action action : actions) {
        if (action.hasGet()) {
            throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
        }
        MutationType type = action.getMutation().getMutateType();
        if (rm == null) {
            rm = new RowMutations(action.getMutation().getRow().toByteArray(), actions.size());
        }
        switch(type) {
            case PUT:
                rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
                break;
            case DELETE:
                rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
                break;
            default:
                throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
        }
        // To unify the response format with doNonAtomicRegionMutation and read through client's
        // AsyncProcess we have to add an empty result instance per operation
        resultOrExceptionOrBuilder.clear();
        resultOrExceptionOrBuilder.setIndex(i++);
        builder.addResultOrException(resultOrExceptionOrBuilder.build());
    }
    return region.checkAndRowMutate(row, family, qualifier, compareOp, comparator, rm, Boolean.TRUE);
}
Also used : MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos) RowMutations(org.apache.hadoop.hbase.client.RowMutations)

Example 3 with Action

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action in project hbase by apache.

the class RSRpcServices method mutateRows.

/**
   * Mutate a list of rows atomically.
   *
   * @param region
   * @param actions
   * @param cellScanner if non-null, the mutation data -- the Cell content.
   * @throws IOException
   */
private void mutateRows(final Region region, final List<ClientProtos.Action> actions, final CellScanner cellScanner, RegionActionResult.Builder builder) throws IOException {
    if (!region.getRegionInfo().isMetaTable()) {
        regionServer.cacheFlusher.reclaimMemStoreMemory();
    }
    RowMutations rm = null;
    int i = 0;
    ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder = ClientProtos.ResultOrException.newBuilder();
    for (ClientProtos.Action action : actions) {
        if (action.hasGet()) {
            throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
        }
        MutationType type = action.getMutation().getMutateType();
        if (rm == null) {
            rm = new RowMutations(action.getMutation().getRow().toByteArray(), actions.size());
        }
        switch(type) {
            case PUT:
                rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
                break;
            case DELETE:
                rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
                break;
            default:
                throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
        }
        // To unify the response format with doNonAtomicRegionMutation and read through client's
        // AsyncProcess we have to add an empty result instance per operation
        resultOrExceptionOrBuilder.clear();
        resultOrExceptionOrBuilder.setIndex(i++);
        builder.addResultOrException(resultOrExceptionOrBuilder.build());
    }
    region.mutateRow(rm);
}
Also used : MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos) RowMutations(org.apache.hadoop.hbase.client.RowMutations)

Example 4 with Action

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action in project hbase by apache.

the class RSRpcServices method doBatchOp.

/**
   * Execute a list of Put/Delete mutations.
   *
   * @param builder
   * @param region
   * @param mutations
   */
private void doBatchOp(final RegionActionResult.Builder builder, final Region region, final OperationQuota quota, final List<ClientProtos.Action> mutations, final CellScanner cells) {
    Mutation[] mArray = new Mutation[mutations.size()];
    long before = EnvironmentEdgeManager.currentTime();
    boolean batchContainsPuts = false, batchContainsDelete = false;
    try {
        int i = 0;
        for (ClientProtos.Action action : mutations) {
            MutationProto m = action.getMutation();
            Mutation mutation;
            if (m.getMutateType() == MutationType.PUT) {
                mutation = ProtobufUtil.toPut(m, cells);
                batchContainsPuts = true;
            } else {
                mutation = ProtobufUtil.toDelete(m, cells);
                batchContainsDelete = true;
            }
            mArray[i++] = mutation;
            quota.addMutation(mutation);
        }
        if (!region.getRegionInfo().isMetaTable()) {
            regionServer.cacheFlusher.reclaimMemStoreMemory();
        }
        OperationStatus[] codes = region.batchMutate(mArray, HConstants.NO_NONCE, HConstants.NO_NONCE);
        for (i = 0; i < codes.length; i++) {
            int index = mutations.get(i).getIndex();
            Exception e = null;
            switch(codes[i].getOperationStatusCode()) {
                case BAD_FAMILY:
                    e = new NoSuchColumnFamilyException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                case SANITY_CHECK_FAILURE:
                    e = new FailedSanityCheckException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                default:
                    e = new DoNotRetryIOException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                case SUCCESS:
                    builder.addResultOrException(getResultOrException(ClientProtos.Result.getDefaultInstance(), index));
                    break;
            }
        }
    } catch (IOException ie) {
        for (int i = 0; i < mutations.size(); i++) {
            builder.addResultOrException(getResultOrException(ie, mutations.get(i).getIndex()));
        }
    }
    if (regionServer.metricsRegionServer != null) {
        long after = EnvironmentEdgeManager.currentTime();
        if (batchContainsPuts) {
            regionServer.metricsRegionServer.updatePut(after - before);
        }
        if (batchContainsDelete) {
            regionServer.metricsRegionServer.updateDelete(after - before);
        }
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) FileNotFoundException(java.io.FileNotFoundException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) BindException(java.net.BindException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) KeeperException(org.apache.zookeeper.KeeperException) LeaseStillHeldException(org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Aggregations

DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)4 ClientProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)4 Action (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action)4 ResultOrException (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException)4 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)3 IOException (java.io.IOException)2 InterruptedIOException (java.io.InterruptedIOException)2 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)2 RowMutations (org.apache.hadoop.hbase.client.RowMutations)2 FileNotFoundException (java.io.FileNotFoundException)1 BindException (java.net.BindException)1 UnknownHostException (java.net.UnknownHostException)1 MutableObject (org.apache.commons.lang.mutable.MutableObject)1 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)1 MultiActionResultTooLarge (org.apache.hadoop.hbase.MultiActionResultTooLarge)1 NotServingRegionException (org.apache.hadoop.hbase.NotServingRegionException)1 UnknownScannerException (org.apache.hadoop.hbase.UnknownScannerException)1 Get (org.apache.hadoop.hbase.client.Get)1 Mutation (org.apache.hadoop.hbase.client.Mutation)1 Result (org.apache.hadoop.hbase.client.Result)1