Search in sources :

Example 1 with FailedSanityCheckException

use of org.apache.hadoop.hbase.exceptions.FailedSanityCheckException in project hbase by apache.

the class RSRpcServices method doBatchOp.

/**
   * Execute a list of Put/Delete mutations.
   *
   * @param builder
   * @param region
   * @param mutations
   */
private void doBatchOp(final RegionActionResult.Builder builder, final Region region, final OperationQuota quota, final List<ClientProtos.Action> mutations, final CellScanner cells) {
    Mutation[] mArray = new Mutation[mutations.size()];
    long before = EnvironmentEdgeManager.currentTime();
    boolean batchContainsPuts = false, batchContainsDelete = false;
    try {
        int i = 0;
        for (ClientProtos.Action action : mutations) {
            MutationProto m = action.getMutation();
            Mutation mutation;
            if (m.getMutateType() == MutationType.PUT) {
                mutation = ProtobufUtil.toPut(m, cells);
                batchContainsPuts = true;
            } else {
                mutation = ProtobufUtil.toDelete(m, cells);
                batchContainsDelete = true;
            }
            mArray[i++] = mutation;
            quota.addMutation(mutation);
        }
        if (!region.getRegionInfo().isMetaTable()) {
            regionServer.cacheFlusher.reclaimMemStoreMemory();
        }
        OperationStatus[] codes = region.batchMutate(mArray, HConstants.NO_NONCE, HConstants.NO_NONCE);
        for (i = 0; i < codes.length; i++) {
            int index = mutations.get(i).getIndex();
            Exception e = null;
            switch(codes[i].getOperationStatusCode()) {
                case BAD_FAMILY:
                    e = new NoSuchColumnFamilyException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                case SANITY_CHECK_FAILURE:
                    e = new FailedSanityCheckException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                default:
                    e = new DoNotRetryIOException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                case SUCCESS:
                    builder.addResultOrException(getResultOrException(ClientProtos.Result.getDefaultInstance(), index));
                    break;
            }
        }
    } catch (IOException ie) {
        for (int i = 0; i < mutations.size(); i++) {
            builder.addResultOrException(getResultOrException(ie, mutations.get(i).getIndex()));
        }
    }
    if (regionServer.metricsRegionServer != null) {
        long after = EnvironmentEdgeManager.currentTime();
        if (batchContainsPuts) {
            regionServer.metricsRegionServer.updatePut(after - before);
        }
        if (batchContainsDelete) {
            regionServer.metricsRegionServer.updateDelete(after - before);
        }
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) FileNotFoundException(java.io.FileNotFoundException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) BindException(java.net.BindException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) KeeperException(org.apache.zookeeper.KeeperException) LeaseStillHeldException(org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 2 with FailedSanityCheckException

use of org.apache.hadoop.hbase.exceptions.FailedSanityCheckException in project hbase by apache.

the class HRegion method checkBatchOp.

private boolean checkBatchOp(BatchOperation<?> batchOp, final int lastIndexExclusive, final Map<byte[], List<Cell>>[] familyMaps, final long now, final ObservedExceptionsInBatch observedExceptions) throws IOException {
    boolean skip = false;
    // Skip anything that "ran" already
    if (batchOp.retCodeDetails[lastIndexExclusive].getOperationStatusCode() != OperationStatusCode.NOT_RUN) {
        return true;
    }
    Mutation mutation = batchOp.getMutation(lastIndexExclusive);
    Map<byte[], List<Cell>> familyMap = mutation.getFamilyCellMap();
    // store the family map reference to allow for mutations
    familyMaps[lastIndexExclusive] = familyMap;
    try {
        checkAndPrepareMutation(mutation, batchOp.isInReplay(), familyMap, now);
    } catch (NoSuchColumnFamilyException nscf) {
        final String msg = "No such column family in batch mutation. ";
        if (observedExceptions.hasSeenNoSuchFamily()) {
            LOG.warn(msg + nscf.getMessage());
        } else {
            LOG.warn(msg, nscf);
            observedExceptions.sawNoSuchFamily();
        }
        batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus(OperationStatusCode.BAD_FAMILY, nscf.getMessage());
        skip = true;
    } catch (FailedSanityCheckException fsce) {
        final String msg = "Batch Mutation did not pass sanity check. ";
        if (observedExceptions.hasSeenFailedSanityCheck()) {
            LOG.warn(msg + fsce.getMessage());
        } else {
            LOG.warn(msg, fsce);
            observedExceptions.sawFailedSanityCheck();
        }
        batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus(OperationStatusCode.SANITY_CHECK_FAILURE, fsce.getMessage());
        skip = true;
    } catch (WrongRegionException we) {
        final String msg = "Batch mutation had a row that does not belong to this region. ";
        if (observedExceptions.hasSeenWrongRegion()) {
            LOG.warn(msg + we.getMessage());
        } else {
            LOG.warn(msg, we);
            observedExceptions.sawWrongRegion();
        }
        batchOp.retCodeDetails[lastIndexExclusive] = new OperationStatus(OperationStatusCode.SANITY_CHECK_FAILURE, we.getMessage());
        skip = true;
    }
    return skip;
}
Also used : ArrayList(java.util.ArrayList) AbstractList(java.util.AbstractList) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException)

Example 3 with FailedSanityCheckException

use of org.apache.hadoop.hbase.exceptions.FailedSanityCheckException in project hbase by apache.

the class HRegion method checkTimestamps.

/**
 * Check the collection of families for valid timestamps
 * @param familyMap
 * @param now current timestamp
 * @throws FailedSanityCheckException
 */
public void checkTimestamps(final Map<byte[], List<Cell>> familyMap, long now) throws FailedSanityCheckException {
    if (timestampSlop == HConstants.LATEST_TIMESTAMP) {
        return;
    }
    long maxTs = now + timestampSlop;
    for (List<Cell> kvs : familyMap.values()) {
        // HBASE-12023 HRegion.applyFamilyMapToMemstore creates too many iterator objects
        assert kvs instanceof RandomAccess;
        int listSize = kvs.size();
        for (int i = 0; i < listSize; i++) {
            Cell cell = kvs.get(i);
            // see if the user-side TS is out of range. latest = server-side
            long ts = cell.getTimestamp();
            if (ts != HConstants.LATEST_TIMESTAMP && ts > maxTs) {
                throw new FailedSanityCheckException("Timestamp for KV out of range " + cell + " (too.new=" + timestampSlop + ")");
            }
        }
    }
}
Also used : RandomAccess(java.util.RandomAccess) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException)

Example 4 with FailedSanityCheckException

use of org.apache.hadoop.hbase.exceptions.FailedSanityCheckException in project hbase by apache.

the class RSRpcServices method doBatchOp.

/**
 * Execute a list of mutations.
 *
 * @param builder
 * @param region
 * @param mutations
 */
private void doBatchOp(final RegionActionResult.Builder builder, final HRegion region, final OperationQuota quota, final List<ClientProtos.Action> mutations, final CellScanner cells, long nonceGroup, ActivePolicyEnforcement spaceQuotaEnforcement, boolean atomic) throws IOException {
    Mutation[] mArray = new Mutation[mutations.size()];
    long before = EnvironmentEdgeManager.currentTime();
    boolean batchContainsPuts = false, batchContainsDelete = false;
    try {
        /**
         * HBASE-17924
         * mutationActionMap is a map to map the relation between mutations and actions
         * since mutation array may have been reoredered.In order to return the right
         * result or exception to the corresponding actions, We need to know which action
         * is the mutation belong to. We can't sort ClientProtos.Action array, since they
         * are bonded to cellscanners.
         */
        Map<Mutation, ClientProtos.Action> mutationActionMap = new HashMap<>();
        int i = 0;
        long nonce = HConstants.NO_NONCE;
        for (ClientProtos.Action action : mutations) {
            if (action.hasGet()) {
                throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
            }
            MutationProto m = action.getMutation();
            Mutation mutation;
            switch(m.getMutateType()) {
                case PUT:
                    mutation = ProtobufUtil.toPut(m, cells);
                    batchContainsPuts = true;
                    break;
                case DELETE:
                    mutation = ProtobufUtil.toDelete(m, cells);
                    batchContainsDelete = true;
                    break;
                case INCREMENT:
                    mutation = ProtobufUtil.toIncrement(m, cells);
                    nonce = m.hasNonce() ? m.getNonce() : HConstants.NO_NONCE;
                    break;
                case APPEND:
                    mutation = ProtobufUtil.toAppend(m, cells);
                    nonce = m.hasNonce() ? m.getNonce() : HConstants.NO_NONCE;
                    break;
                default:
                    throw new DoNotRetryIOException("Invalid mutation type : " + m.getMutateType());
            }
            mutationActionMap.put(mutation, action);
            mArray[i++] = mutation;
            checkCellSizeLimit(region, mutation);
            // Check if a space quota disallows this mutation
            spaceQuotaEnforcement.getPolicyEnforcement(region).check(mutation);
            quota.addMutation(mutation);
        }
        if (!region.getRegionInfo().isMetaRegion()) {
            server.getMemStoreFlusher().reclaimMemStoreMemory();
        }
        // order is preserved as its expected from the client
        if (!atomic) {
            Arrays.sort(mArray, (v1, v2) -> Row.COMPARATOR.compare(v1, v2));
        }
        OperationStatus[] codes = region.batchMutate(mArray, atomic, nonceGroup, nonce);
        // result to the first element of the ResultOrException list
        if (atomic) {
            List<ResultOrException> resultOrExceptions = new ArrayList<>();
            List<Result> results = new ArrayList<>();
            for (i = 0; i < codes.length; i++) {
                if (codes[i].getResult() != null) {
                    results.add(codes[i].getResult());
                }
                if (i != 0) {
                    resultOrExceptions.add(getResultOrException(ClientProtos.Result.getDefaultInstance(), i));
                }
            }
            if (results.isEmpty()) {
                builder.addResultOrException(getResultOrException(ClientProtos.Result.getDefaultInstance(), 0));
            } else {
                // Merge the results of the Increment/Append operations
                List<Cell> cellList = new ArrayList<>();
                for (Result result : results) {
                    if (result.rawCells() != null) {
                        cellList.addAll(Arrays.asList(result.rawCells()));
                    }
                }
                Result result = Result.create(cellList);
                // Set the merged result of the Increment/Append operations to the first element of the
                // ResultOrException list
                builder.addResultOrException(getResultOrException(ProtobufUtil.toResult(result), 0));
            }
            builder.addAllResultOrException(resultOrExceptions);
            return;
        }
        for (i = 0; i < codes.length; i++) {
            Mutation currentMutation = mArray[i];
            ClientProtos.Action currentAction = mutationActionMap.get(currentMutation);
            int index = currentAction.hasIndex() ? currentAction.getIndex() : i;
            Exception e;
            switch(codes[i].getOperationStatusCode()) {
                case BAD_FAMILY:
                    e = new NoSuchColumnFamilyException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                case SANITY_CHECK_FAILURE:
                    e = new FailedSanityCheckException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                default:
                    e = new DoNotRetryIOException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
                case SUCCESS:
                    builder.addResultOrException(getResultOrException(ClientProtos.Result.getDefaultInstance(), index));
                    break;
                case STORE_TOO_BUSY:
                    e = new RegionTooBusyException(codes[i].getExceptionMsg());
                    builder.addResultOrException(getResultOrException(e, index));
                    break;
            }
        }
    } finally {
        int processedMutationIndex = 0;
        for (Action mutation : mutations) {
            // The non-null mArray[i] means the cell scanner has been read.
            if (mArray[processedMutationIndex++] == null) {
                skipCellsForMutation(mutation, cells);
            }
        }
        updateMutationMetrics(region, before, batchContainsPuts, batchContainsDelete);
    }
}
Also used : RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) MutationProto(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) Cell(org.apache.hadoop.hbase.Cell) ByteBufferExtendedCell(org.apache.hadoop.hbase.ByteBufferExtendedCell) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) RegionTooBusyException(org.apache.hadoop.hbase.RegionTooBusyException) IOException(java.io.IOException) LeaseStillHeldException(org.apache.hadoop.hbase.regionserver.LeaseManager.LeaseStillHeldException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) UnknownProtocolException(org.apache.hadoop.hbase.exceptions.UnknownProtocolException) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) UncheckedIOException(java.io.UncheckedIOException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) FileNotFoundException(java.io.FileNotFoundException) BindException(java.net.BindException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) Mutation(org.apache.hadoop.hbase.client.Mutation) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Aggregations

FailedSanityCheckException (org.apache.hadoop.hbase.exceptions.FailedSanityCheckException)4 Mutation (org.apache.hadoop.hbase.client.Mutation)3 FileNotFoundException (java.io.FileNotFoundException)2 IOException (java.io.IOException)2 BindException (java.net.BindException)2 ArrayList (java.util.ArrayList)2 ByteBufferExtendedCell (org.apache.hadoop.hbase.ByteBufferExtendedCell)2 Cell (org.apache.hadoop.hbase.Cell)2 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)2 DroppedSnapshotException (org.apache.hadoop.hbase.DroppedSnapshotException)2 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)2 NotServingRegionException (org.apache.hadoop.hbase.NotServingRegionException)2 UnknownScannerException (org.apache.hadoop.hbase.UnknownScannerException)2 OutOfOrderScannerNextException (org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException)2 ScannerResetException (org.apache.hadoop.hbase.exceptions.ScannerResetException)2 ServerNotRunningYetException (org.apache.hadoop.hbase.ipc.ServerNotRunningYetException)2 ClientProtos (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)2 Action (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action)2 MutationProto (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto)2 ResultOrException (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException)2