Search in sources :

Example 11 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class RequestConverter method buildNoDataRegionAction.

/**
   * Create a protocol buffer MultiRequest for row mutations that does not hold data.  Data/Cells
   * are carried outside of protobuf.  Return references to the Cells in <code>cells</code> param.
    * Does not propagate Action absolute position.  Does not set atomic action on the created
   * RegionAtomic.  Caller should do that if wanted.
   * @param regionName
   * @param rowMutations
   * @param cells Return in here a list of Cells as CellIterable.
   * @return a region mutation minus data
   * @throws IOException
   */
public static RegionAction.Builder buildNoDataRegionAction(final byte[] regionName, final RowMutations rowMutations, final List<CellScannable> cells, final RegionAction.Builder regionActionBuilder, final ClientProtos.Action.Builder actionBuilder, final MutationProto.Builder mutationBuilder) throws IOException {
    for (Mutation mutation : rowMutations.getMutations()) {
        MutationType type = null;
        if (mutation instanceof Put) {
            type = MutationType.PUT;
        } else if (mutation instanceof Delete) {
            type = MutationType.DELETE;
        } else {
            throw new DoNotRetryIOException("RowMutations supports only put and delete, not " + mutation.getClass().getName());
        }
        mutationBuilder.clear();
        MutationProto mp = ProtobufUtil.toMutationNoData(type, mutation, mutationBuilder);
        cells.add(mutation);
        actionBuilder.clear();
        regionActionBuilder.addAction(actionBuilder.setMutation(mp).build());
    }
    return regionActionBuilder;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Mutation(org.apache.hadoop.hbase.client.Mutation) Put(org.apache.hadoop.hbase.client.Put) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)

Example 12 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class RequestConverter method buildRegionAction.

/**
   * Create a protocol buffer MultiRequest for row mutations.
   * Does not propagate Action absolute position.  Does not set atomic action on the created
   * RegionAtomic.  Caller should do that if wanted.
   * @param regionName
   * @param rowMutations
   * @return a data-laden RegionMutation.Builder
   * @throws IOException
   */
public static RegionAction.Builder buildRegionAction(final byte[] regionName, final RowMutations rowMutations) throws IOException {
    RegionAction.Builder builder = getRegionActionBuilderWithRegion(RegionAction.newBuilder(), regionName);
    ClientProtos.Action.Builder actionBuilder = ClientProtos.Action.newBuilder();
    MutationProto.Builder mutationBuilder = MutationProto.newBuilder();
    for (Mutation mutation : rowMutations.getMutations()) {
        MutationType mutateType = null;
        if (mutation instanceof Put) {
            mutateType = MutationType.PUT;
        } else if (mutation instanceof Delete) {
            mutateType = MutationType.DELETE;
        } else {
            throw new DoNotRetryIOException("RowMutations supports only put and delete, not " + mutation.getClass().getName());
        }
        mutationBuilder.clear();
        MutationProto mp = ProtobufUtil.toMutation(mutateType, mutation, mutationBuilder);
        actionBuilder.clear();
        actionBuilder.setMutation(mp);
        builder.addAction(actionBuilder.build());
    }
    return builder;
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) Action(org.apache.hadoop.hbase.client.Action) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Mutation(org.apache.hadoop.hbase.client.Mutation) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) Put(org.apache.hadoop.hbase.client.Put)

Example 13 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class MultiRowMutationEndpoint method mutateRows.

@Override
public void mutateRows(RpcController controller, MutateRowsRequest request, RpcCallback<MutateRowsResponse> done) {
    boolean matches = true;
    List<Region.RowLock> rowLocks = null;
    try {
        // set of rows to lock, sorted to avoid deadlocks
        SortedSet<byte[]> rowsToLock = new TreeSet<>(Bytes.BYTES_COMPARATOR);
        List<MutationProto> mutateRequestList = request.getMutationRequestList();
        List<Mutation> mutations = new ArrayList<>(mutateRequestList.size());
        for (MutationProto m : mutateRequestList) {
            mutations.add(ProtobufUtil.toMutation(m));
        }
        Region region = env.getRegion();
        RegionInfo regionInfo = region.getRegionInfo();
        for (Mutation m : mutations) {
            // check whether rows are in range for this region
            if (!HRegion.rowIsInRange(regionInfo, m.getRow())) {
                String msg = "Requested row out of range '" + Bytes.toStringBinary(m.getRow()) + "'";
                if (rowsToLock.isEmpty()) {
                    // allow client to retry
                    throw new WrongRegionException(msg);
                } else {
                    // rows are split between regions, do not retry
                    throw new org.apache.hadoop.hbase.DoNotRetryIOException(msg);
                }
            }
            rowsToLock.add(m.getRow());
        }
        if (request.getConditionCount() > 0) {
            // Get row locks for the mutations and the conditions
            rowLocks = new ArrayList<>();
            for (ClientProtos.Condition condition : request.getConditionList()) {
                rowsToLock.add(condition.getRow().toByteArray());
            }
            for (byte[] row : rowsToLock) {
                try {
                    // write lock
                    Region.RowLock rowLock = region.getRowLock(row, false);
                    rowLocks.add(rowLock);
                } catch (IOException ioe) {
                    LOGGER.warn("Failed getting lock, row={}, in region {}", Bytes.toStringBinary(row), this, ioe);
                    throw ioe;
                }
            }
            // Check if all the conditions match
            for (ClientProtos.Condition condition : request.getConditionList()) {
                if (!matches(region, condition)) {
                    matches = false;
                    break;
                }
            }
        }
        if (matches) {
            // call utility method on region
            long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
            long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE;
            region.mutateRowsWithLocks(mutations, rowsToLock, nonceGroup, nonce);
        }
    } catch (IOException e) {
        CoprocessorRpcUtils.setControllerException(controller, e);
    } finally {
        if (rowLocks != null) {
            // Release the acquired row locks
            for (Region.RowLock rowLock : rowLocks) {
                rowLock.release();
            }
        }
    }
    done.run(MutateRowsResponse.newBuilder().setProcessed(matches).build());
}
Also used : ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) WrongRegionException(org.apache.hadoop.hbase.regionserver.WrongRegionException) IOException(java.io.IOException) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) TreeSet(java.util.TreeSet) HRegion(org.apache.hadoop.hbase.regionserver.HRegion) Region(org.apache.hadoop.hbase.regionserver.Region) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 14 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class RSRpcServices method mutate.

/**
 * Mutate data in a table.
 *
 * @param rpcc the RPC controller
 * @param request the mutate request
 */
@Override
public MutateResponse mutate(final RpcController rpcc, final MutateRequest request) throws ServiceException {
    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
    // It is also the conduit via which we pass back data.
    HBaseRpcController controller = (HBaseRpcController) rpcc;
    CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
    OperationQuota quota = null;
    RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
    // Clear scanner so we are not holding on to reference across call.
    if (controller != null) {
        controller.setCellScanner(null);
    }
    try {
        checkOpen();
        requestCount.increment();
        rpcMutateRequestCount.increment();
        HRegion region = getRegion(request.getRegion());
        rejectIfInStandByState(region);
        MutateResponse.Builder builder = MutateResponse.newBuilder();
        MutationProto mutation = request.getMutation();
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
        quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.MUTATE);
        ActivePolicyEnforcement spaceQuotaEnforcement = getSpaceQuotaManager().getActiveEnforcements();
        if (request.hasCondition()) {
            CheckAndMutateResult result = checkAndMutate(region, quota, mutation, cellScanner, request.getCondition(), nonceGroup, spaceQuotaEnforcement);
            builder.setProcessed(result.isSuccess());
            boolean clientCellBlockSupported = isClientCellBlockSupport(context);
            addResult(builder, result.getResult(), controller, clientCellBlockSupported);
            if (clientCellBlockSupported) {
                addSize(context, result.getResult(), null);
            }
        } else {
            Result r = null;
            Boolean processed = null;
            MutationType type = mutation.getMutateType();
            switch(type) {
                case APPEND:
                    // TODO: this doesn't actually check anything.
                    r = append(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement);
                    break;
                case INCREMENT:
                    // TODO: this doesn't actually check anything.
                    r = increment(region, quota, mutation, cellScanner, nonceGroup, spaceQuotaEnforcement);
                    break;
                case PUT:
                    put(region, quota, mutation, cellScanner, spaceQuotaEnforcement);
                    processed = Boolean.TRUE;
                    break;
                case DELETE:
                    delete(region, quota, mutation, cellScanner, spaceQuotaEnforcement);
                    processed = Boolean.TRUE;
                    break;
                default:
                    throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
            }
            if (processed != null) {
                builder.setProcessed(processed);
            }
            boolean clientCellBlockSupported = isClientCellBlockSupport(context);
            addResult(builder, r, controller, clientCellBlockSupported);
            if (clientCellBlockSupported) {
                addSize(context, r, null);
            }
        }
        return builder.build();
    } catch (IOException ie) {
        server.checkFileSystem();
        throw new ServiceException(ie);
    } finally {
        if (quota != null) {
            quota.close();
        }
    }
}
Also used : RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) ActivePolicyEnforcement(org.apache.hadoop.hbase.quotas.ActivePolicyEnforcement) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) UncheckedIOException(java.io.UncheckedIOException) CellScanner(org.apache.hadoop.hbase.CellScanner) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) MutateResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutateResponse) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean)

Example 15 with MutationProto

use of org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto in project hbase by apache.

the class RSRpcServices method doBatchOp.

/**
 * Execute a list of mutations.
 *
 * @param builder
 * @param region
 * @param mutations
 */
private void doBatchOp(final RegionActionResult.Builder builder, final HRegion region, final OperationQuota quota, final List<ClientProtos.Action> mutations, final CellScanner cells, long nonceGroup, ActivePolicyEnforcement spaceQuotaEnforcement, boolean atomic) throws IOException {
    Mutation[] mArray = new Mutation[mutations.size()];
    long before = EnvironmentEdgeManager.currentTime();
    boolean batchContainsPuts = false, batchContainsDelete = false;
    try {
        /**
         * HBASE-17924
         * mutationActionMap is a map to map the relation between mutations and actions
         * since mutation array may have been reoredered.In order to return the right
         * result or exception to the corresponding actions, We need to know which action
         * is the mutation belong to. We can't sort ClientProtos.Action array, since they
         * are bonded to cellscanners.
         */
        Map<Mutation, ClientProtos.Action> mutationActionMap = new HashMap<>();
        int i = 0;
        long nonce = HConstants.NO_NONCE;
        for (ClientProtos.Action action : mutations) {
            if (action.hasGet()) {
                throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
            }
            MutationProto m = action.getMutation();
            Mutation mutation;
            switch(m.getMutateType()) {
                case PUT:
                    mutation = ProtobufUtil.toPut(m, cells);
                    batchContainsPuts = true;
                    break;
                case DELETE:
                    mutation = ProtobufUtil.toDelete(m, cells);
                    batchContainsDelete = true;
                    break;
                case INCREMENT:
                    mutation = ProtobufUtil.toIncrement(m, cells);
                    nonce = m.hasNonce() ? m.getNonce() : HConstants.NO_NONCE;
                    break;
                case APPEND:
                    mutation = ProtobufUtil.toAppend(m, cells);
                    nonce = m.hasNonce() ? m.getNonce() : HConstants.NO_NONCE;
                    break;
                default:
                    throw new DoNotRetryIOException("Invalid mutation type : " + m.getMutateType());
            }
            mutationActionMap.put(mutation, action);
            mArray[i++] = mutation;
            checkCellSizeLimit(region, mutation);
            // Check if a space quota disallows this mutation
            spaceQuotaEnforcement.getPolicyEnforcement(region).check(mutation);
            quota.addMutation(mutation);
        }
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        // order is preserved as its expected from the client
        if (!atomic) {
            Arrays.sort(mArray, (v1, v2) -> Row.COMPARATOR.compare(v1, v2));
        }
        OperationStatus[] codes = region.batchMutate(mArray, atomic, nonceGroup, nonce);
        // result to the first element of the ResultOrException list
        if (atomic) {
            List<ResultOrException> resultOrExceptions = new ArrayList<>();
            List<Result> results = new ArrayList<>();
            for (i = 0; i < codes.length; i++) {
                if (codes[i].getResult() != null) {
                    results.add(codes[i].getResult());
                }
                if (i != 0) {
                    resultOrExceptions.add(getResultOrException(ClientProtos.Result.getDefaultInstance(), i));
                }
            }
            if (results.isEmpty()) {
                builder.addResultOrException(getResultOrException(ClientProtos.Result.getDefaultInstance(), 0));
            } else {
                // Merge the results of the Increment/Append operations
                List<Cell> cellList = new ArrayList<>();
                for (Result result : results) {
                    if (result.rawCells() != null) {
                        cellList.addAll(Arrays.asList(result.rawCells()));
                    }
                }
                Result result = Result.create(cellList);
                // Set the merged result of the Increment/Append operations to the first element of the
                // ResultOrException list
                builder.addResultOrException(getResultOrException(ProtobufUtil.toResult(result), 0));
            }
            builder.addAllResultOrException(resultOrExceptions);
            return;
        }
        for (i = 0; i < codes.length; i++) {
            Mutation currentMutation = mArray[i];
            ClientProtos.Action currentAction = mutationActionMap.get(currentMutation);
            int index = currentAction.hasIndex() ? currentAction.getIndex() : i;
            Exception e;
            switch(codes[i].getOperationStatusCode()) {
                case BAD_FAMILY:
                    e = new NoSuchColumnFamilyException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                case SANITY_CHECK_FAILURE:
                    e = new FailedSanityCheckException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                default:
                    e = new DoNotRetryIOException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                case SUCCESS:
                    builder.addResultOrException(getResultOrException(ClientProtos.Result.getDefaultInstance(), index));
                    break;
                case STORE_TOO_BUSY:
                    e = new RegionTooBusyException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
            }
        }
    } finally {
        int processedMutationIndex = 0;
        for (Action mutation : mutations) {
            // The non-null mArray[i] means the cell scanner has been read.
            if (mArray[processedMutationIndex++] == null) {
                skipCellsForMutation(mutation, cells);
            }
        }
        updateMutationMetrics(region, before, batchContainsPuts, batchContainsDelete);
    }
}
Also used : RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) IOException(java.io.IOException) LeaseStillHeldException(org.apache.hadoop.hbase.regionserver.LeaseManager.LeaseStillHeldException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) UnknownProtocolException(org.apache.hadoop.hbase.exceptions.UnknownProtocolException) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) UncheckedIOException(java.io.UncheckedIOException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) FileNotFoundException(java.io.FileNotFoundException) BindException(java.net.BindException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Aggregations

MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)27 Test (org.junit.Test)13 TableName (org.apache.hadoop.hbase.TableName)10 Mutation (org.apache.hadoop.hbase.client.Mutation)10 ClientProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)10 IOException (java.io.IOException)9 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)8 Delete (org.apache.hadoop.hbase.client.Delete)8 Put (org.apache.hadoop.hbase.client.Put)8 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)8 Append (org.apache.hadoop.hbase.client.Append)7 Increment (org.apache.hadoop.hbase.client.Increment)7 Cell (org.apache.hadoop.hbase.Cell)6 CoprocessorRpcChannel (org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel)6 ColumnValue (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue)6 RegionAction (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction)6 MultiRowMutationService (org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService)6 MutateRowsRequest (org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest)6 MutateRowsResponse (org.apache.hadoop.hbase.shaded.protobuf.generated.MultiRowMutationProtos.MutateRowsResponse)6 QualifierValue (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.ColumnValue.QualifierValue)4