Search in sources :

Example 36 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class RSRpcServices method replay.

/**
   * Replay the given changes when distributedLogReplay WAL edits from a failed RS. The guarantee is
   * that the given mutations will be durable on the receiving RS if this method returns without any
   * exception.
   * @param controller the RPC controller
   * @param request the request
   * @throws ServiceException
   */
@Override
@QosPriority(priority = HConstants.REPLAY_QOS)
public ReplicateWALEntryResponse replay(final RpcController controller, final ReplicateWALEntryRequest request) throws ServiceException {
    long before = EnvironmentEdgeManager.currentTime();
    CellScanner cells = ((HBaseRpcController) controller).cellScanner();
    try {
        checkOpen();
        List<WALEntry> entries = request.getEntryList();
        if (entries == null || entries.isEmpty()) {
            // empty input
            return ReplicateWALEntryResponse.newBuilder().build();
        }
        ByteString regionName = entries.get(0).getKey().getEncodedRegionName();
        Region region = regionServer.getRegionByEncodedName(regionName.toStringUtf8());
        RegionCoprocessorHost coprocessorHost = ServerRegionReplicaUtil.isDefaultReplica(region.getRegionInfo()) ? region.getCoprocessorHost() : // do not invoke coprocessors if this is a secondary region replica
        null;
        List<Pair<WALKey, WALEdit>> walEntries = new ArrayList<>();
        // Skip adding the edits to WAL if this is a secondary region replica
        boolean isPrimary = RegionReplicaUtil.isDefaultReplica(region.getRegionInfo());
        Durability durability = isPrimary ? Durability.USE_DEFAULT : Durability.SKIP_WAL;
        for (WALEntry entry : entries) {
            if (!regionName.equals(entry.getKey().getEncodedRegionName())) {
                throw new NotServingRegionException("Replay request contains entries from multiple " + "regions. First region:" + regionName.toStringUtf8() + " , other region:" + entry.getKey().getEncodedRegionName());
            }
            if (regionServer.nonceManager != null && isPrimary) {
                long nonceGroup = entry.getKey().hasNonceGroup() ? entry.getKey().getNonceGroup() : HConstants.NO_NONCE;
                long nonce = entry.getKey().hasNonce() ? entry.getKey().getNonce() : HConstants.NO_NONCE;
                regionServer.nonceManager.reportOperationFromWal(nonceGroup, nonce, entry.getKey().getWriteTime());
            }
            Pair<WALKey, WALEdit> walEntry = (coprocessorHost == null) ? null : new Pair<>();
            List<WALSplitter.MutationReplay> edits = WALSplitter.getMutationsFromWALEntry(entry, cells, walEntry, durability);
            if (coprocessorHost != null) {
                // KeyValue.
                if (coprocessorHost.preWALRestore(region.getRegionInfo(), walEntry.getFirst(), walEntry.getSecond())) {
                    // if bypass this log entry, ignore it ...
                    continue;
                }
                walEntries.add(walEntry);
            }
            if (edits != null && !edits.isEmpty()) {
                long replaySeqId = (entry.getKey().hasOrigSequenceNumber()) ? entry.getKey().getOrigSequenceNumber() : entry.getKey().getLogSequenceNumber();
                OperationStatus[] result = doReplayBatchOp(region, edits, replaySeqId);
                // check if it's a partial success
                for (int i = 0; result != null && i < result.length; i++) {
                    if (result[i] != OperationStatus.SUCCESS) {
                        throw new IOException(result[i].getExceptionMsg());
                    }
                }
            }
        }
        //sync wal at the end because ASYNC_WAL is used above
        WAL wal = getWAL(region);
        if (wal != null) {
            wal.sync();
        }
        if (coprocessorHost != null) {
            for (Pair<WALKey, WALEdit> entry : walEntries) {
                coprocessorHost.postWALRestore(region.getRegionInfo(), entry.getFirst(), entry.getSecond());
            }
        }
        return ReplicateWALEntryResponse.newBuilder().build();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    } finally {
        if (regionServer.metricsRegionServer != null) {
            regionServer.metricsRegionServer.updateReplay(EnvironmentEdgeManager.currentTime() - before);
        }
    }
}
Also used : WAL(org.apache.hadoop.hbase.wal.WAL) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ArrayList(java.util.ArrayList) CellScanner(org.apache.hadoop.hbase.CellScanner) WALKey(org.apache.hadoop.hbase.wal.WALKey) WALEdit(org.apache.hadoop.hbase.regionserver.wal.WALEdit) Pair(org.apache.hadoop.hbase.util.Pair) NameInt64Pair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) Durability(org.apache.hadoop.hbase.client.Durability) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) WALEntry(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.WALEntry) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Example 37 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class RSRpcServices method multi.

/**
   * Execute multiple actions on a table: get, mutate, and/or execCoprocessor
   *
   * @param rpcc the RPC controller
   * @param request the multi request
   * @throws ServiceException
   */
@Override
public MultiResponse multi(final RpcController rpcc, final MultiRequest request) throws ServiceException {
    try {
        checkOpen();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
    // rpc controller is how we bring in data via the back door;  it is unprotobuf'ed data.
    // It is also the conduit via which we pass back data.
    HBaseRpcController controller = (HBaseRpcController) rpcc;
    CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
    if (controller != null) {
        controller.setCellScanner(null);
    }
    long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
    // this will contain all the cells that we need to return. It's created later, if needed.
    List<CellScannable> cellsToReturn = null;
    MultiResponse.Builder responseBuilder = MultiResponse.newBuilder();
    RegionActionResult.Builder regionActionResultBuilder = RegionActionResult.newBuilder();
    Boolean processed = null;
    RegionScannersCloseCallBack closeCallBack = null;
    RpcCallContext context = RpcServer.getCurrentCall();
    this.rpcMultiRequestCount.increment();
    Map<RegionSpecifier, ClientProtos.RegionLoadStats> regionStats = new HashMap<>(request.getRegionActionCount());
    for (RegionAction regionAction : request.getRegionActionList()) {
        this.requestCount.add(regionAction.getActionCount());
        OperationQuota quota;
        Region region;
        regionActionResultBuilder.clear();
        RegionSpecifier regionSpecifier = regionAction.getRegion();
        try {
            region = getRegion(regionSpecifier);
            quota = getQuotaManager().checkQuota(region, regionAction.getActionList());
        } catch (IOException e) {
            rpcServer.getMetrics().exception(e);
            regionActionResultBuilder.setException(ResponseConverter.buildException(e));
            responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
            // corresponding to these Mutations.
            if (cellScanner != null) {
                skipCellsForMutations(regionAction.getActionList(), cellScanner);
            }
            // For this region it's a failure.
            continue;
        }
        if (regionAction.hasAtomic() && regionAction.getAtomic()) {
            // Need to return an item per Action along w/ Action index.  TODO.
            try {
                if (request.hasCondition()) {
                    Condition condition = request.getCondition();
                    byte[] row = condition.getRow().toByteArray();
                    byte[] family = condition.getFamily().toByteArray();
                    byte[] qualifier = condition.getQualifier().toByteArray();
                    CompareOp compareOp = CompareOp.valueOf(condition.getCompareType().name());
                    ByteArrayComparable comparator = ProtobufUtil.toComparator(condition.getComparator());
                    processed = checkAndRowMutate(region, regionAction.getActionList(), cellScanner, row, family, qualifier, compareOp, comparator, regionActionResultBuilder);
                } else {
                    mutateRows(region, regionAction.getActionList(), cellScanner, regionActionResultBuilder);
                    processed = Boolean.TRUE;
                }
            } catch (IOException e) {
                rpcServer.getMetrics().exception(e);
                // As it's atomic, we may expect it's a global failure.
                regionActionResultBuilder.setException(ResponseConverter.buildException(e));
            }
        } else {
            // doNonAtomicRegionMutation manages the exception internally
            if (context != null && closeCallBack == null) {
                // An RpcCallBack that creates a list of scanners that needs to perform callBack
                // operation on completion of multiGets.
                // Set this only once
                closeCallBack = new RegionScannersCloseCallBack();
                context.setCallBack(closeCallBack);
            }
            cellsToReturn = doNonAtomicRegionMutation(region, quota, regionAction, cellScanner, regionActionResultBuilder, cellsToReturn, nonceGroup, closeCallBack, context);
        }
        responseBuilder.addRegionActionResult(regionActionResultBuilder.build());
        quota.close();
        ClientProtos.RegionLoadStats regionLoadStats = ((HRegion) region).getLoadStatistics();
        if (regionLoadStats != null) {
            regionStats.put(regionSpecifier, regionLoadStats);
        }
    }
    // Load the controller with the Cells to return.
    if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) {
        controller.setCellScanner(CellUtil.createCellScanner(cellsToReturn));
    }
    if (processed != null) {
        responseBuilder.setProcessed(processed);
    }
    MultiRegionLoadStats.Builder builder = MultiRegionLoadStats.newBuilder();
    for (Entry<RegionSpecifier, ClientProtos.RegionLoadStats> stat : regionStats.entrySet()) {
        builder.addRegion(stat.getKey());
        builder.addStat(stat.getValue());
    }
    responseBuilder.setRegionStatistics(builder);
    return responseBuilder.build();
}
Also used : CellScannable(org.apache.hadoop.hbase.CellScannable) MultiResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiResponse) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) RegionAction(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionAction) CellScanner(org.apache.hadoop.hbase.CellScanner) RegionSpecifier(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier) ByteArrayComparable(org.apache.hadoop.hbase.filter.ByteArrayComparable) MultiRegionLoadStats(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRegionLoadStats) RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) Condition(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.Condition) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) MultiRegionLoadStats(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MultiRegionLoadStats) CompareOp(org.apache.hadoop.hbase.filter.CompareFilter.CompareOp) ClientProtos(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos)

Example 38 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class RSRpcServices method getServerInfo.

/**
   * Get some information of the region server.
   *
   * @param controller the RPC controller
   * @param request the request
   * @throws ServiceException
   */
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public GetServerInfoResponse getServerInfo(final RpcController controller, final GetServerInfoRequest request) throws ServiceException {
    try {
        checkOpen();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
    requestCount.increment();
    int infoPort = regionServer.infoServer != null ? regionServer.infoServer.getPort() : -1;
    return ResponseConverter.buildGetServerInfoResponse(regionServer.serverName, infoPort);
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Example 39 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class RSRpcServices method flushRegion.

/**
   * Flush a region on the region server.
   *
   * @param controller the RPC controller
   * @param request the request
   * @throws ServiceException
   */
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public FlushRegionResponse flushRegion(final RpcController controller, final FlushRegionRequest request) throws ServiceException {
    try {
        checkOpen();
        requestCount.increment();
        Region region = getRegion(request.getRegion());
        LOG.info("Flushing " + region.getRegionInfo().getRegionNameAsString());
        boolean shouldFlush = true;
        if (request.hasIfOlderThanTs()) {
            shouldFlush = region.getEarliestFlushTimeForAllStores() < request.getIfOlderThanTs();
        }
        FlushRegionResponse.Builder builder = FlushRegionResponse.newBuilder();
        if (shouldFlush) {
            boolean writeFlushWalMarker = request.hasWriteFlushWalMarker() ? request.getWriteFlushWalMarker() : false;
            // Go behind the curtain so we can manage writing of the flush WAL marker
            HRegion.FlushResultImpl flushResult = (HRegion.FlushResultImpl) ((HRegion) region).flushcache(true, writeFlushWalMarker);
            boolean compactionNeeded = flushResult.isCompactionNeeded();
            if (compactionNeeded) {
                regionServer.compactSplitThread.requestSystemCompaction(region, "Compaction through user triggered flush");
            }
            builder.setFlushed(flushResult.isFlushSucceeded());
            builder.setWroteFlushWalMarker(flushResult.wroteFlushWalMarker);
        }
        builder.setLastFlushTime(region.getEarliestFlushTimeForAllStores());
        return builder.build();
    } catch (DroppedSnapshotException ex) {
        // Cache flush can fail in a few places. If it fails in a critical
        // section, we get a DroppedSnapshotException and a replay of wal
        // is required. Currently the only way to do this is a restart of
        // the server.
        regionServer.abort("Replay of WAL required. Forcing server shutdown", ex);
        throw new ServiceException(ex);
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
}
Also used : DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) FlushRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.FlushRegionResponse) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Example 40 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class RSRpcServices method bulkLoadHFile.

/**
   * Atomically bulk load several HFiles into an open region
   * @return true if successful, false is failed but recoverably (no action)
   * @throws ServiceException if failed unrecoverably
   */
@Override
public BulkLoadHFileResponse bulkLoadHFile(final RpcController controller, final BulkLoadHFileRequest request) throws ServiceException {
    long start = EnvironmentEdgeManager.currentTime();
    try {
        checkOpen();
        requestCount.increment();
        Region region = getRegion(request.getRegion());
        boolean bypass = false;
        boolean loaded = false;
        Map<byte[], List<Path>> map = null;
        if (!request.hasBulkToken()) {
            // Old style bulk load. This will not be supported in future releases
            List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount());
            for (FamilyPath familyPath : request.getFamilyPathList()) {
                familyPaths.add(new Pair<>(familyPath.getFamily().toByteArray(), familyPath.getPath()));
            }
            if (region.getCoprocessorHost() != null) {
                bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
            }
            try {
                if (!bypass) {
                    map = region.bulkLoadHFiles(familyPaths, request.getAssignSeqNum(), null, request.getCopyFile());
                    if (map != null) {
                        loaded = true;
                    }
                }
            } finally {
                if (region.getCoprocessorHost() != null) {
                    loaded = region.getCoprocessorHost().postBulkLoadHFile(familyPaths, map, loaded);
                }
            }
        } else {
            // secure bulk load
            map = regionServer.secureBulkLoadManager.secureBulkLoadHFiles(region, request);
        }
        BulkLoadHFileResponse.Builder builder = BulkLoadHFileResponse.newBuilder();
        if (map != null) {
            loaded = true;
        }
        builder.setLoaded(loaded);
        return builder.build();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    } finally {
        if (regionServer.metricsRegionServer != null) {
            regionServer.metricsRegionServer.updateBulkLoad(EnvironmentEdgeManager.currentTime() - start);
        }
    }
}
Also used : FamilyPath(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest.FamilyPath) ArrayList(java.util.ArrayList) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) BulkLoadHFileResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileResponse) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) NameInt64Pair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameInt64Pair) NameBytesPair(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)

Aggregations

ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)95 IOException (java.io.IOException)76 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)65 InterruptedIOException (java.io.InterruptedIOException)28 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)24 ServerName (org.apache.hadoop.hbase.ServerName)16 QosPriority (org.apache.hadoop.hbase.ipc.QosPriority)15 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)14 ArrayList (java.util.ArrayList)12 HBaseRpcController (org.apache.hadoop.hbase.ipc.HBaseRpcController)12 TableName (org.apache.hadoop.hbase.TableName)10 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)10 Test (org.junit.Test)9 CellScanner (org.apache.hadoop.hbase.CellScanner)5 UnknownRegionException (org.apache.hadoop.hbase.UnknownRegionException)4 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)4 Result (org.apache.hadoop.hbase.client.Result)4 ForeignException (org.apache.hadoop.hbase.errorhandling.ForeignException)4 RpcCallContext (org.apache.hadoop.hbase.ipc.RpcCallContext)4 OperationQuota (org.apache.hadoop.hbase.quotas.OperationQuota)4