Search in sources :

Example 66 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class ClientScanner method loadCache.

/**
   * Contact the servers to load more {@link Result}s in the cache.
   */
protected void loadCache() throws IOException {
    // check if scanner was closed during previous prefetch
    if (closed) {
        return;
    }
    long remainingResultSize = maxScannerResultSize;
    int countdown = this.caching;
    // This is possible if we just stopped at the boundary of a region in the previous call.
    if (callable == null) {
        if (!moveToNextRegion()) {
            return;
        }
    }
    // This flag is set when we want to skip the result returned. We do
    // this when we reset scanner because it split under us.
    MutableBoolean retryAfterOutOfOrderException = new MutableBoolean(true);
    // Even if we are retrying due to UnknownScannerException, ScannerResetException, etc. we should
    // make sure that we are not retrying indefinitely.
    int retriesLeft = getRetries();
    for (; ; ) {
        Result[] values;
        try {
            // Server returns a null values if scanning is to stop. Else,
            // returns an empty array if scanning is to go on and we've just
            // exhausted current region.
            // now we will also fetch data when openScanner, so do not make a next call again if values
            // is already non-null.
            values = call(callable, caller, scannerTimeout, true);
            // of the loop as it happens for the cases where we see exceptions.
            if (callable.switchedToADifferentReplica()) {
                // Any accumulated partial results are no longer valid since the callable will
                // openScanner with the correct startkey and we must pick up from there
                scanResultCache.clear();
                this.currentRegion = callable.getHRegionInfo();
            }
            retryAfterOutOfOrderException.setValue(true);
        } catch (DoNotRetryIOException e) {
            handleScanError(e, retryAfterOutOfOrderException, retriesLeft--);
            // reopen the scanner
            if (!moveToNextRegion()) {
                break;
            }
            continue;
        }
        long currentTime = System.currentTimeMillis();
        if (this.scanMetrics != null) {
            this.scanMetrics.sumOfMillisSecBetweenNexts.addAndGet(currentTime - lastNext);
        }
        lastNext = currentTime;
        // Groom the array of Results that we received back from the server before adding that
        // Results to the scanner's cache. If partial results are not allowed to be seen by the
        // caller, all book keeping will be performed within this method.
        Result[] resultsToAddToCache = scanResultCache.addAndGet(values, callable.isHeartbeatMessage());
        if (resultsToAddToCache.length > 0) {
            for (Result rs : resultsToAddToCache) {
                cache.add(rs);
                long estimatedHeapSizeOfResult = calcEstimatedSize(rs);
                countdown--;
                remainingResultSize -= estimatedHeapSizeOfResult;
                addEstimatedSize(estimatedHeapSizeOfResult);
                this.lastResult = rs;
            }
            if (scan.getLimit() > 0) {
                int newLimit = scan.getLimit() - numberOfIndividualRows(Arrays.asList(resultsToAddToCache));
                assert newLimit >= 0;
                scan.setLimit(newLimit);
            }
        }
        if (scanExhausted(values)) {
            closeScanner();
            closed = true;
            break;
        }
        boolean regionExhausted = regionExhausted(values);
        if (callable.isHeartbeatMessage()) {
            if (!cache.isEmpty()) {
                // unnecesary delays to the caller
                if (LOG.isTraceEnabled()) {
                    LOG.trace("Heartbeat message received and cache contains Results." + " Breaking out of scan loop");
                }
                // closeScannerIfExhausted
                break;
            }
        }
        if (countdown <= 0) {
            // we have enough result.
            closeScannerIfExhausted(regionExhausted);
            break;
        }
        if (remainingResultSize <= 0) {
            if (!cache.isEmpty()) {
                closeScannerIfExhausted(regionExhausted);
                break;
            } else {
                // we have reached the max result size but we still can not find anything to return to the
                // user. Reset the maxResultSize and try again.
                remainingResultSize = maxScannerResultSize;
            }
        }
        // we are done with the current region
        if (regionExhausted) {
            if (!moveToNextRegion()) {
                break;
            }
        }
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) MutableBoolean(org.apache.commons.lang.mutable.MutableBoolean)

Example 67 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class MultiRowMutationProcessor method process.

@Override
public void process(long now, HRegion region, List<Mutation> mutationsToApply, WALEdit walEdit) throws IOException {
    byte[] byteNow = Bytes.toBytes(now);
    // Check mutations
    for (Mutation m : this.mutations) {
        if (m instanceof Put) {
            Map<byte[], List<Cell>> familyMap = m.getFamilyCellMap();
            region.checkFamilies(familyMap.keySet());
            region.checkTimestamps(familyMap, now);
            region.updateCellTimestamps(familyMap.values(), byteNow);
        } else if (m instanceof Delete) {
            Delete d = (Delete) m;
            region.prepareDelete(d);
            region.prepareDeleteTimestamps(d, d.getFamilyCellMap(), byteNow);
        } else {
            throw new DoNotRetryIOException("Action must be Put or Delete. But was: " + m.getClass().getName());
        }
        mutationsToApply.add(m);
    }
    // Apply edits to a single WALEdit
    for (Mutation m : mutations) {
        for (List<Cell> cells : m.getFamilyCellMap().values()) {
            boolean writeToWAL = m.getDurability() != Durability.SKIP_WAL;
            for (Cell cell : cells) {
                if (writeToWAL)
                    walEdit.add(cell);
            }
        }
    }
}
Also used : Delete(org.apache.hadoop.hbase.client.Delete) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) List(java.util.List) Mutation(org.apache.hadoop.hbase.client.Mutation) Cell(org.apache.hadoop.hbase.Cell) Put(org.apache.hadoop.hbase.client.Put)

Example 68 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class RSRpcServices method openRegion.

/**
   * Open asynchronously a region or a set of regions on the region server.
   *
   * The opening is coordinated by ZooKeeper, and this method requires the znode to be created
   *  before being called. As a consequence, this method should be called only from the master.
   * <p>
   * Different manages states for the region are:
   * </p><ul>
   *  <li>region not opened: the region opening will start asynchronously.</li>
   *  <li>a close is already in progress: this is considered as an error.</li>
   *  <li>an open is already in progress: this new open request will be ignored. This is important
   *  because the Master can do multiple requests if it crashes.</li>
   *  <li>the region is already opened:  this new open request will be ignored.</li>
   *  </ul>
   * <p>
   * Bulk assign: If there are more than 1 region to open, it will be considered as a bulk assign.
   * For a single region opening, errors are sent through a ServiceException. For bulk assign,
   * errors are put in the response as FAILED_OPENING.
   * </p>
   * @param controller the RPC controller
   * @param request the request
   * @throws ServiceException
   */
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public OpenRegionResponse openRegion(final RpcController controller, final OpenRegionRequest request) throws ServiceException {
    requestCount.increment();
    if (request.hasServerStartCode()) {
        // check that we are the same server that this RPC is intended for.
        long serverStartCode = request.getServerStartCode();
        if (regionServer.serverName.getStartcode() != serverStartCode) {
            throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " + "different server with startCode: " + serverStartCode + ", this server is: " + regionServer.serverName));
        }
    }
    OpenRegionResponse.Builder builder = OpenRegionResponse.newBuilder();
    final int regionCount = request.getOpenInfoCount();
    final Map<TableName, HTableDescriptor> htds = new HashMap<>(regionCount);
    final boolean isBulkAssign = regionCount > 1;
    try {
        checkOpen();
    } catch (IOException ie) {
        TableName tableName = null;
        if (regionCount == 1) {
            RegionInfo ri = request.getOpenInfo(0).getRegion();
            if (ri != null) {
                tableName = ProtobufUtil.toTableName(ri.getTableName());
            }
        }
        if (!TableName.META_TABLE_NAME.equals(tableName)) {
            throw new ServiceException(ie);
        }
        // We are assigning meta, wait a little for regionserver to finish initialization.
        int timeout = regionServer.conf.getInt(HConstants.HBASE_RPC_TIMEOUT_KEY, HConstants.DEFAULT_HBASE_RPC_TIMEOUT) >> // Quarter of RPC timeout
        2;
        long endTime = System.currentTimeMillis() + timeout;
        synchronized (regionServer.online) {
            try {
                while (System.currentTimeMillis() <= endTime && !regionServer.isStopped() && !regionServer.isOnline()) {
                    regionServer.online.wait(regionServer.msgInterval);
                }
                checkOpen();
            } catch (InterruptedException t) {
                Thread.currentThread().interrupt();
                throw new ServiceException(t);
            } catch (IOException e) {
                throw new ServiceException(e);
            }
        }
    }
    long masterSystemTime = request.hasMasterSystemTime() ? request.getMasterSystemTime() : -1;
    for (RegionOpenInfo regionOpenInfo : request.getOpenInfoList()) {
        final HRegionInfo region = HRegionInfo.convert(regionOpenInfo.getRegion());
        HTableDescriptor htd;
        try {
            String encodedName = region.getEncodedName();
            byte[] encodedNameBytes = region.getEncodedNameAsBytes();
            final Region onlineRegion = regionServer.getFromOnlineRegions(encodedName);
            if (onlineRegion != null) {
                // The region is already online. This should not happen any more.
                String error = "Received OPEN for the region:" + region.getRegionNameAsString() + ", which is already online";
                regionServer.abort(error);
                throw new IOException(error);
            }
            LOG.info("Open " + region.getRegionNameAsString());
            final Boolean previous = regionServer.regionsInTransitionInRS.putIfAbsent(encodedNameBytes, Boolean.TRUE);
            if (Boolean.FALSE.equals(previous)) {
                if (regionServer.getFromOnlineRegions(encodedName) != null) {
                    // There is a close in progress. This should not happen any more.
                    String error = "Received OPEN for the region:" + region.getRegionNameAsString() + ", which we are already trying to CLOSE";
                    regionServer.abort(error);
                    throw new IOException(error);
                }
                regionServer.regionsInTransitionInRS.put(encodedNameBytes, Boolean.TRUE);
            }
            if (Boolean.TRUE.equals(previous)) {
                // An open is in progress. This is supported, but let's log this.
                LOG.info("Receiving OPEN for the region:" + region.getRegionNameAsString() + ", which we are already trying to OPEN" + " - ignoring this new request for this region.");
            }
            // We are opening this region. If it moves back and forth for whatever reason, we don't
            // want to keep returning the stale moved record while we are opening/if we close again.
            regionServer.removeFromMovedRegions(region.getEncodedName());
            if (previous == null || !previous.booleanValue()) {
                // check if the region to be opened is marked in recovering state in ZK
                if (ZKSplitLog.isRegionMarkedRecoveringInZK(regionServer.getZooKeeper(), region.getEncodedName())) {
                    // rolling restart/upgrade where we want to Master/RS see same configuration
                    if (!regionOpenInfo.hasOpenForDistributedLogReplay() || regionOpenInfo.getOpenForDistributedLogReplay()) {
                        regionServer.recoveringRegions.put(region.getEncodedName(), null);
                    } else {
                        // Remove stale recovery region from ZK when we open region not for recovering which
                        // could happen when turn distributedLogReplay off from on.
                        List<String> tmpRegions = new ArrayList<>();
                        tmpRegions.add(region.getEncodedName());
                        ZKSplitLog.deleteRecoveringRegionZNodes(regionServer.getZooKeeper(), tmpRegions);
                    }
                }
                htd = htds.get(region.getTable());
                if (htd == null) {
                    htd = regionServer.tableDescriptors.get(region.getTable());
                    htds.put(region.getTable(), htd);
                }
                if (htd == null) {
                    throw new IOException("Missing table descriptor for " + region.getEncodedName());
                }
                // Need to pass the expected version in the constructor.
                if (region.isMetaRegion()) {
                    regionServer.service.submit(new OpenMetaHandler(regionServer, regionServer, region, htd, masterSystemTime));
                } else {
                    if (regionOpenInfo.getFavoredNodesCount() > 0) {
                        regionServer.updateRegionFavoredNodesMapping(region.getEncodedName(), regionOpenInfo.getFavoredNodesList());
                    }
                    if (htd.getPriority() >= HConstants.ADMIN_QOS || region.getTable().isSystemTable()) {
                        regionServer.service.submit(new OpenPriorityRegionHandler(regionServer, regionServer, region, htd, masterSystemTime));
                    } else {
                        regionServer.service.submit(new OpenRegionHandler(regionServer, regionServer, region, htd, masterSystemTime));
                    }
                }
            }
            builder.addOpeningState(RegionOpeningState.OPENED);
        } catch (KeeperException zooKeeperEx) {
            LOG.error("Can't retrieve recovering state from zookeeper", zooKeeperEx);
            throw new ServiceException(zooKeeperEx);
        } catch (IOException ie) {
            LOG.warn("Failed opening region " + region.getRegionNameAsString(), ie);
            if (isBulkAssign) {
                builder.addOpeningState(RegionOpeningState.FAILED_OPENING);
            } else {
                throw new ServiceException(ie);
            }
        }
    }
    return builder.build();
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ArrayList(java.util.ArrayList) RegionInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionInfo) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) RegionOpenInfo(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo) HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) OpenRegionHandler(org.apache.hadoop.hbase.regionserver.handler.OpenRegionHandler) OpenPriorityRegionHandler(org.apache.hadoop.hbase.regionserver.handler.OpenPriorityRegionHandler) OpenMetaHandler(org.apache.hadoop.hbase.regionserver.handler.OpenMetaHandler) OpenRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) HTableDescriptor(org.apache.hadoop.hbase.HTableDescriptor) TableName(org.apache.hadoop.hbase.TableName) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) KeeperException(org.apache.zookeeper.KeeperException) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Example 69 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class RSRpcServices method doNonAtomicRegionMutation.

/**
   * Run through the regionMutation <code>rm</code> and per Mutation, do the work, and then when
   * done, add an instance of a {@link ResultOrException} that corresponds to each Mutation.
   * @param region
   * @param actions
   * @param cellScanner
   * @param builder
   * @param cellsToReturn  Could be null. May be allocated in this method.  This is what this
   * method returns as a 'result'.
   * @param closeCallBack the callback to be used with multigets
   * @param context the current RpcCallContext
   * @return Return the <code>cellScanner</code> passed
   */
private List<CellScannable> doNonAtomicRegionMutation(final Region region, final OperationQuota quota, final RegionAction actions, final CellScanner cellScanner, final RegionActionResult.Builder builder, List<CellScannable> cellsToReturn, long nonceGroup, final RegionScannersCloseCallBack closeCallBack, RpcCallContext context) {
    // Gather up CONTIGUOUS Puts and Deletes in this mutations List.  Idea is that rather than do
    // one at a time, we instead pass them in batch.  Be aware that the corresponding
    // ResultOrException instance that matches each Put or Delete is then added down in the
    // doBatchOp call.  We should be staying aligned though the Put and Delete are deferred/batched
    List<ClientProtos.Action> mutations = null;
    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
    IOException sizeIOE = null;
    Object lastBlock = null;
    ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = ResultOrException.newBuilder();
    boolean hasResultOrException = false;
    for (ClientProtos.Action action : actions.getActionList()) {
        hasResultOrException = false;
        resultOrExceptionBuilder.clear();
        try {
            Result r = null;
            if (context != null && context.isRetryImmediatelySupported() && (context.getResponseCellSize() > maxQuotaResultSize || context.getResponseBlockSize() + context.getResponseExceptionSize() > maxQuotaResultSize)) {
                // change after the response size limit is reached.
                if (sizeIOE == null) {
                    // We don't need the stack un-winding do don't throw the exception.
                    // Throwing will kill the JVM's JIT.
                    //
                    // Instead just create the exception and then store it.
                    sizeIOE = new MultiActionResultTooLarge("Max size exceeded" + " CellSize: " + context.getResponseCellSize() + " BlockSize: " + context.getResponseBlockSize());
                    // Only report the exception once since there's only one request that
                    // caused the exception. Otherwise this number will dominate the exceptions count.
                    rpcServer.getMetrics().exception(sizeIOE);
                }
                // Now that there's an exception is known to be created
                // use it for the response.
                //
                // This will create a copy in the builder.
                hasResultOrException = true;
                NameBytesPair pair = ResponseConverter.buildException(sizeIOE);
                resultOrExceptionBuilder.setException(pair);
                context.incrementResponseExceptionSize(pair.getSerializedSize());
                resultOrExceptionBuilder.setIndex(action.getIndex());
                builder.addResultOrException(resultOrExceptionBuilder.build());
                if (cellScanner != null) {
                    skipCellsForMutation(action, cellScanner);
                }
                continue;
            }
            if (action.hasGet()) {
                long before = EnvironmentEdgeManager.currentTime();
                try {
                    Get get = ProtobufUtil.toGet(action.getGet());
                    if (context != null) {
                        r = get(get, ((HRegion) region), closeCallBack, context);
                    } else {
                        r = region.get(get);
                    }
                } finally {
                    if (regionServer.metricsRegionServer != null) {
                        regionServer.metricsRegionServer.updateGet(EnvironmentEdgeManager.currentTime() - before);
                    }
                }
            } else if (action.hasServiceCall()) {
                hasResultOrException = true;
                try {
                    com.google.protobuf.Message result = execServiceOnRegion(region, action.getServiceCall());
                    ClientProtos.CoprocessorServiceResult.Builder serviceResultBuilder = ClientProtos.CoprocessorServiceResult.newBuilder();
                    resultOrExceptionBuilder.setServiceResult(serviceResultBuilder.setValue(serviceResultBuilder.getValueBuilder().setName(result.getClass().getName()).setValue(UnsafeByteOperations.unsafeWrap(result.toByteArray()))));
                } catch (IOException ioe) {
                    rpcServer.getMetrics().exception(ioe);
                    NameBytesPair pair = ResponseConverter.buildException(ioe);
                    resultOrExceptionBuilder.setException(pair);
                    context.incrementResponseExceptionSize(pair.getSerializedSize());
                }
            } else if (action.hasMutation()) {
                MutationType type = action.getMutation().getMutateType();
                if (type != MutationType.PUT && type != MutationType.DELETE && mutations != null && !mutations.isEmpty()) {
                    // Flush out any Puts or Deletes already collected.
                    doBatchOp(builder, region, quota, mutations, cellScanner);
                    mutations.clear();
                }
                switch(type) {
                    case APPEND:
                        r = append(region, quota, action.getMutation(), cellScanner, nonceGroup);
                        break;
                    case INCREMENT:
                        r = increment(region, quota, action.getMutation(), cellScanner, nonceGroup);
                        break;
                    case PUT:
                    case DELETE:
                        // Collect the individual mutations and apply in a batch
                        if (mutations == null) {
                            mutations = new ArrayList<>(actions.getActionCount());
                        }
                        mutations.add(action);
                        break;
                    default:
                        throw new DoNotRetryIOException("Unsupported mutate type: " + type.name());
                }
            } else {
                throw new HBaseIOException("Unexpected Action type");
            }
            if (r != null) {
                ClientProtos.Result pbResult = null;
                if (isClientCellBlockSupport(context)) {
                    pbResult = ProtobufUtil.toResultNoData(r);
                    //  Hard to guess the size here.  Just make a rough guess.
                    if (cellsToReturn == null) {
                        cellsToReturn = new ArrayList<>();
                    }
                    cellsToReturn.add(r);
                } else {
                    pbResult = ProtobufUtil.toResult(r);
                }
                lastBlock = addSize(context, r, lastBlock);
                hasResultOrException = true;
                resultOrExceptionBuilder.setResult(pbResult);
            }
        // Could get to here and there was no result and no exception.  Presumes we added
        // a Put or Delete to the collecting Mutations List for adding later.  In this
        // case the corresponding ResultOrException instance for the Put or Delete will be added
        // down in the doBatchOp method call rather than up here.
        } catch (IOException ie) {
            rpcServer.getMetrics().exception(ie);
            hasResultOrException = true;
            NameBytesPair pair = ResponseConverter.buildException(ie);
            resultOrExceptionBuilder.setException(pair);
            context.incrementResponseExceptionSize(pair.getSerializedSize());
        }
        if (hasResultOrException) {
            // Propagate index.
            resultOrExceptionBuilder.setIndex(action.getIndex());
            builder.addResultOrException(resultOrExceptionBuilder.build());
        }
    }
    // Finish up any outstanding mutations
    if (mutations != null && !mutations.isEmpty()) {
        doBatchOp(builder, region, quota, mutations, cellScanner);
    }
    return cellsToReturn;
}
Also used : MultiActionResultTooLarge(org.apache.hadoop.hbase.MultiActionResultTooLarge) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) Message(org.apache.hadoop.hbase.shaded.com.google.protobuf.Message) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair) Get(org.apache.hadoop.hbase.client.Get) MutableObject(org.apache.commons.lang.mutable.MutableObject) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 70 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class RSRpcServices method checkAndRowMutate.

/**
   * Mutate a list of rows atomically.
   *
   * @param region
   * @param actions
   * @param cellScanner if non-null, the mutation data -- the Cell content.
   * @param row
   * @param family
   * @param qualifier
   * @param compareOp
   * @param comparator @throws IOException
   */
private boolean checkAndRowMutate(final Region region, final List<ClientProtos.Action> actions, final CellScanner cellScanner, byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, ByteArrayComparable comparator, RegionActionResult.Builder builder) throws IOException {
    if (!region.getRegionInfo().isMetaTable()) {
        regionServer.cacheFlusher.reclaimMemStoreMemory();
    }
    RowMutations rm = null;
    int i = 0;
    ClientProtos.ResultOrException.Builder resultOrExceptionOrBuilder = ClientProtos.ResultOrException.newBuilder();
    for (ClientProtos.Action action : actions) {
        if (action.hasGet()) {
            throw new DoNotRetryIOException("Atomic put and/or delete only, not a Get=" + action.getGet());
        }
        MutationType type = action.getMutation().getMutateType();
        if (rm == null) {
            rm = new RowMutations(action.getMutation().getRow().toByteArray(), actions.size());
        }
        switch(type) {
            case PUT:
                rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
                break;
            case DELETE:
                rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
                break;
            default:
                throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
        }
        // To unify the response format with doNonAtomicRegionMutation and read through client's
        // AsyncProcess we have to add an empty result instance per operation
        resultOrExceptionOrBuilder.clear();
        resultOrExceptionOrBuilder.setIndex(i++);
        builder.addResultOrException(resultOrExceptionOrBuilder.build());
    }
    return region.checkAndRowMutate(row, family, qualifier, compareOp, comparator, rm, Boolean.TRUE);
}
Also used : MutationType(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) Action(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Action) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos) RowMutations(org.apache.hadoop.hbase.client.RowMutations)

Aggregations

DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)77 IOException (java.io.IOException)28 Cell (org.apache.hadoop.hbase.Cell)18 ArrayList (java.util.ArrayList)12 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)12 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)12 TableName (org.apache.hadoop.hbase.TableName)11 InterruptedIOException (java.io.InterruptedIOException)10 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)10 Delete (org.apache.hadoop.hbase.client.Delete)10 Put (org.apache.hadoop.hbase.client.Put)10 Test (org.junit.Test)10 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)9 User (org.apache.hadoop.hbase.security.User)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteBufferCell (org.apache.hadoop.hbase.ByteBufferCell)5 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)5