Search in sources :

Example 31 with HBaseRpcController

use of org.apache.hadoop.hbase.ipc.HBaseRpcController in project hbase by apache.

the class RSRpcServices method mutate.

/**
   * Mutate data in a table.
   *
   * @param rpcc the RPC controller
   * @param request the mutate request
   * @throws ServiceException
   */
@Override
public MutateResponse mutate(final RpcController rpcc, final MutateRequest request) throws ServiceException {
    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
    // It is also the conduit via which we pass back data.
    HBaseRpcController controller = (HBaseRpcController) rpcc;
    CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
    OperationQuota quota = null;
    RpcCallContext context = RpcServer.getCurrentCall();
    // Clear scanner so we are not holding on to reference across call.
    if (controller != null) {
        controller.setCellScanner(null);
    }
    try {
        checkOpen();
        requestCount.increment();
        rpcMutateRequestCount.increment();
        Region region = getRegion(request.getRegion());
        MutateResponse.Builder builder = MutateResponse.newBuilder();
        MutationProto mutation = request.getMutation();
        if (!region.getRegionInfo().isMetaTable()) {
            regionServer.cacheFlusher.reclaimMemStoreMemory();
        }
        long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
        Result r = null;
        Boolean processed = null;
        MutationType type = mutation.getMutateType();
        quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
        switch(type) {
            case APPEND:
                // TODO: this doesn't actually check anything.
                r = append(region, quota, mutation, cellScanner, nonceGroup);
                break;
            case INCREMENT:
                // TODO: this doesn't actually check anything.
                r = increment(region, quota, mutation, cellScanner, nonceGroup);
                break;
            case PUT:
                Put put = ProtobufUtil.toPut(mutation, cellScanner);
                quota.addMutation(put);
                if (request.hasCondition()) {
                    Condition condition = request.getCondition();
                    byte[] row = condition.getRow().toByteArray();
                    byte[] family = condition.getFamily().toByteArray();
                    byte[] qualifier = condition.getQualifier().toByteArray();
                    CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
                    ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
                    if (region.getCoprocessorHost() != null) {
                        processed = region.getCoprocessorHost().preCheckAndPut(row, family, qualifier, compareOp, comparator, put);
                    }
                    if (processed == null) {
                        boolean result = region.checkAndMutate(row, family, qualifier, compareOp, comparator, put, true);
                        if (region.getCoprocessorHost() != null) {
                            result = region.getCoprocessorHost().postCheckAndPut(row, family, qualifier, compareOp, comparator, put, result);
                        }
                        processed = result;
                    }
                } else {
                    region.put(put);
                    processed = Boolean.TRUE;
                }
                break;
            case DELETE:
                Delete delete = ProtobufUtil.toDelete(mutation, cellScanner);
                quota.addMutation(delete);
                if (request.hasCondition()) {
                    Condition condition = request.getCondition();
                    byte[] row = condition.getRow().toByteArray();
                    byte[] family = condition.getFamily().toByteArray();
                    byte[] qualifier = condition.getQualifier().toByteArray();
                    CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
                    ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
                    if (region.getCoprocessorHost() != null) {
                        processed = region.getCoprocessorHost().preCheckAndDelete(row, family, qualifier, compareOp, comparator, delete);
                    }
                    if (processed == null) {
                        boolean result = region.checkAndMutate(row, family, qualifier, compareOp, comparator, delete, true);
                        if (region.getCoprocessorHost() != null) {
                            result = region.getCoprocessorHost().postCheckAndDelete(row, family, qualifier, compareOp, comparator, delete, result);
                        }
                        processed = result;
                    }
                } else {
                    region.delete(delete);
                    processed = Boolean.TRUE;
                }
                break;
            default:
                throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
        }
        if (processed != null) {
            builder.setProcessed(processed.booleanValue());
        }
        boolean clientCellBlockSupported = isClientCellBlockSupport(context);
        addResult(builder, r, controller, clientCellBlockSupported);
        if (clientCellBlockSupported) {
            addSize(context, r, null);
        }
        return builder.build();
    } catch (IOException ie) {
        regionServer.checkFileSystem();
        throw new ServiceException(ie);
    } finally {
        if (quota != null) {
            quota.close();
        }
    }
}
Also used : RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) Condition(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition) Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) CellScanner(org.apache.hadoop.hbase.CellScanner) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) Put(org.apache.hadoop.hbase.client.Put) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) MutateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp)

Example 32 with HBaseRpcController

use of org.apache.hadoop.hbase.ipc.HBaseRpcController in project hbase by apache.

the class RSRpcServices method get.

/**
   * Get data from a table.
   *
   * @param controller the RPC controller
   * @param request the get request
   * @throws ServiceException
   */
@Override
public GetResponse get(final RpcController controller, final GetRequest request) throws ServiceException {
    long before = EnvironmentEdgeManager.currentTime();
    OperationQuota quota = null;
    try {
        checkOpen();
        requestCount.increment();
        rpcGetRequestCount.increment();
        Region region = getRegion(request.getRegion());
        GetResponse.Builder builder = GetResponse.newBuilder();
        ClientProtos.Get get = request.getGet();
        Boolean existence = null;
        Result r = null;
        RpcCallContext context = RpcServer.getCurrentCall();
        quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.GET);
        Get clientGet = ProtobufUtil.toGet(get);
        if (get.getExistenceOnly() && region.getCoprocessorHost() != null) {
            existence = region.getCoprocessorHost().preExists(clientGet);
        }
        if (existence == null) {
            if (context != null) {
                r = get(clientGet, ((HRegion) region), null, context);
            } else {
                // for test purpose
                r = region.get(clientGet);
            }
            if (get.getExistenceOnly()) {
                boolean exists = r.getExists();
                if (region.getCoprocessorHost() != null) {
                    exists = region.getCoprocessorHost().postExists(clientGet, exists);
                }
                existence = exists;
            }
        }
        if (existence != null) {
            ClientProtos.Result pbr = ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0);
            builder.setResult(pbr);
        } else if (r != null) {
            ClientProtos.Result pbr;
            if (isClientCellBlockSupport(context) && controller instanceof HBaseRpcController && VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 3)) {
                pbr = ProtobufUtil.toResultNoData(r);
                ((HBaseRpcController) controller).setCellScanner(CellUtil.createCellScanner(r.rawCells()));
                addSize(context, r, null);
            } else {
                pbr = ProtobufUtil.toResult(r);
            }
            builder.setResult(pbr);
        }
        if (r != null) {
            quota.addGetResult(r);
        }
        return builder.build();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    } finally {
        if (regionServer.metricsRegionServer != null) {
            regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before);
        }
        if (quota != null) {
            quota.close();
        }
    }
}
Also used : RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) GetResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.GetResponse) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) Get(org.apache.hadoop.hbase.client.Get) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Aggregations

HBaseRpcController (org.apache.hadoop.hbase.ipc.HBaseRpcController)32 IOException (java.io.IOException)19 AdminService (org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService)16 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)12 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)10 InterruptedIOException (java.io.InterruptedIOException)9 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)6 NotServingRegionException (org.apache.hadoop.hbase.NotServingRegionException)6 ArrayList (java.util.ArrayList)5 CellScanner (org.apache.hadoop.hbase.CellScanner)5 ServerName (org.apache.hadoop.hbase.ServerName)5 Result (org.apache.hadoop.hbase.client.Result)4 RpcCallContext (org.apache.hadoop.hbase.ipc.RpcCallContext)4 OperationQuota (org.apache.hadoop.hbase.quotas.OperationQuota)4 RegionActionResult (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult)4 RemoteException (org.apache.hadoop.ipc.RemoteException)4 Callable (java.util.concurrent.Callable)3 CellScannable (org.apache.hadoop.hbase.CellScannable)3 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)3 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2