Search in sources :

Example 31 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class ServerManager method sendFavoredNodes.

public void sendFavoredNodes(final ServerName server, Map<HRegionInfo, List<ServerName>> favoredNodes) throws IOException {
    AdminService.BlockingInterface admin = getRsAdmin(server);
    if (admin == null) {
        LOG.warn("Attempting to send favored nodes update rpc to server " + server.toString() + " failed because no RPC connection found to this server");
    } else {
        List<Pair<HRegionInfo, List<ServerName>>> regionUpdateInfos = new ArrayList<>();
        for (Entry<HRegionInfo, List<ServerName>> entry : favoredNodes.entrySet()) {
            regionUpdateInfos.add(new Pair<>(entry.getKey(), entry.getValue()));
        }
        UpdateFavoredNodesRequest request = RequestConverter.buildUpdateFavoredNodesRequest(regionUpdateInfos);
        try {
            admin.updateFavoredNodes(null, request);
        } catch (ServiceException se) {
            throw ProtobufUtil.getRemoteException(se);
        }
    }
}
Also used : HRegionInfo(org.apache.hadoop.hbase.HRegionInfo) UpdateFavoredNodesRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest) AdminService(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) ServerName(org.apache.hadoop.hbase.ServerName) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) List(java.util.List) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) Pair(org.apache.hadoop.hbase.util.Pair)

Example 32 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class ServerManager method sendRegionOpen.

// RPC methods to region servers
/**
   * Sends an OPEN RPC to the specified server to open the specified region.
   * <p>
   * Open should not fail but can if server just crashed.
   * <p>
   * @param server server to open a region
   * @param region region to open
   * @param favoredNodes
   */
public RegionOpeningState sendRegionOpen(final ServerName server, HRegionInfo region, List<ServerName> favoredNodes) throws IOException {
    AdminService.BlockingInterface admin = getRsAdmin(server);
    if (admin == null) {
        throw new IOException("Attempting to send OPEN RPC to server " + server.toString() + " failed because no RPC connection found to this server");
    }
    OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, region, favoredNodes, (RecoveryMode.LOG_REPLAY == this.master.getMasterWalManager().getLogRecoveryMode()));
    try {
        OpenRegionResponse response = admin.openRegion(null, request);
        return ResponseConverter.getRegionOpeningState(response);
    } catch (ServiceException se) {
        checkForRSznode(server, se);
        throw ProtobufUtil.getRemoteException(se);
    }
}
Also used : OpenRegionRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest) OpenRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse) AdminService(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) IOException(java.io.IOException)

Example 33 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class ServerManager method sendRegionOpen.

/**
   * Sends an OPEN RPC to the specified server to open the specified region.
   * <p>
   * Open should not fail but can if server just crashed.
   * <p>
   * @param server server to open a region
   * @param regionOpenInfos info of a list of regions to open
   * @return a list of region opening states
   */
public List<RegionOpeningState> sendRegionOpen(ServerName server, List<Pair<HRegionInfo, List<ServerName>>> regionOpenInfos) throws IOException {
    AdminService.BlockingInterface admin = getRsAdmin(server);
    if (admin == null) {
        throw new IOException("Attempting to send OPEN RPC to server " + server.toString() + " failed because no RPC connection found to this server");
    }
    OpenRegionRequest request = RequestConverter.buildOpenRegionRequest(server, regionOpenInfos, (RecoveryMode.LOG_REPLAY == this.master.getMasterWalManager().getLogRecoveryMode()));
    try {
        OpenRegionResponse response = admin.openRegion(null, request);
        return ResponseConverter.getRegionOpeningStateList(response);
    } catch (ServiceException se) {
        checkForRSznode(server, se);
        throw ProtobufUtil.getRemoteException(se);
    }
}
Also used : OpenRegionRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionRequest) OpenRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.OpenRegionResponse) AdminService(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.AdminService) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) IOException(java.io.IOException)

Example 34 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class RSRpcServices method cleanupBulkLoad.

@Override
public CleanupBulkLoadResponse cleanupBulkLoad(RpcController controller, CleanupBulkLoadRequest request) throws ServiceException {
    try {
        checkOpen();
        requestCount.increment();
        Region region = getRegion(request.getRegion());
        regionServer.secureBulkLoadManager.cleanupBulkLoad(region, request);
        CleanupBulkLoadResponse response = CleanupBulkLoadResponse.newBuilder().build();
        return response;
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) CleanupBulkLoadResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.CleanupBulkLoadResponse)

Example 35 with ServiceException

use of org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException in project hbase by apache.

the class RSRpcServices method scan.

/**
   * Scan data in a table.
   *
   * @param controller the RPC controller
   * @param request the scan request
   * @throws ServiceException
   */
@Override
public ScanResponse scan(final RpcController controller, final ScanRequest request) throws ServiceException {
    if (controller != null && !(controller instanceof HBaseRpcController)) {
        throw new UnsupportedOperationException("We only do " + "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
    }
    if (!request.hasScannerId() && !request.hasScan()) {
        throw new ServiceException(new DoNotRetryIOException("Missing required input: scannerId or scan"));
    }
    try {
        checkOpen();
    } catch (IOException e) {
        if (request.hasScannerId()) {
            String scannerName = Long.toString(request.getScannerId());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Server shutting down and client tried to access missing scanner " + scannerName);
            }
            if (regionServer.leases != null) {
                try {
                    regionServer.leases.cancelLease(scannerName);
                } catch (LeaseException le) {
                    // No problem, ignore
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
                    }
                }
            }
        }
        throw new ServiceException(e);
    }
    requestCount.increment();
    rpcScanRequestCount.increment();
    RegionScannerHolder rsh;
    ScanResponse.Builder builder = ScanResponse.newBuilder();
    try {
        if (request.hasScannerId()) {
            rsh = getRegionScanner(request);
        } else {
            rsh = newRegionScanner(request, builder);
        }
    } catch (IOException e) {
        if (e == SCANNER_ALREADY_CLOSED) {
            // the old client will still send a close request to us. Just ignore it and return.
            return builder.build();
        }
        throw new ServiceException(e);
    }
    Region region = rsh.r;
    String scannerName = rsh.scannerName;
    Leases.Lease lease;
    try {
        // Remove lease while its being processed in server; protects against case
        // where processing of request takes > lease expiration time.
        lease = regionServer.leases.removeLease(scannerName);
    } catch (LeaseException e) {
        throw new ServiceException(e);
    }
    if (request.hasRenew() && request.getRenew()) {
        // add back and return
        addScannerLeaseBack(lease);
        try {
            checkScanNextCallSeq(request, rsh);
        } catch (OutOfOrderScannerNextException e) {
            throw new ServiceException(e);
        }
        return builder.build();
    }
    OperationQuota quota;
    try {
        quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
    } catch (IOException e) {
        addScannerLeaseBack(lease);
        throw new ServiceException(e);
    }
    ;
    try {
        checkScanNextCallSeq(request, rsh);
    } catch (OutOfOrderScannerNextException e) {
        addScannerLeaseBack(lease);
        throw new ServiceException(e);
    }
    // Now we have increased the next call sequence. If we give client an error, the retry will
    // never success. So we'd better close the scanner and return a DoNotRetryIOException to client
    // and then client will try to open a new scanner.
    boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
    // this is scan.getCaching
    int rows;
    if (request.hasNumberOfRows()) {
        rows = request.getNumberOfRows();
    } else {
        rows = closeScanner ? 0 : 1;
    }
    RpcCallContext context = RpcServer.getCurrentCall();
    // now let's do the real scan.
    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
    RegionScanner scanner = rsh.s;
    boolean moreResults = true;
    boolean moreResultsInRegion = true;
    // this is the limit of rows for this scan, if we the number of rows reach this value, we will
    // close the scanner.
    int limitOfRows;
    if (request.hasLimitOfRows()) {
        limitOfRows = request.getLimitOfRows();
        rows = Math.min(rows, limitOfRows);
    } else {
        limitOfRows = -1;
    }
    MutableObject lastBlock = new MutableObject();
    boolean scannerClosed = false;
    try {
        List<Result> results = new ArrayList<>();
        if (rows > 0) {
            boolean done = false;
            // Call coprocessor. Get region info from scanner.
            if (region.getCoprocessorHost() != null) {
                Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
                if (!results.isEmpty()) {
                    for (Result r : results) {
                        lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
                    }
                }
                if (bypass != null && bypass.booleanValue()) {
                    done = true;
                }
            }
            if (!done) {
                moreResultsInRegion = scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, results, builder, lastBlock, context);
            }
        }
        quota.addScanResult(results);
        if (scanner.isFilterDone() && results.isEmpty()) {
            // If the scanner's filter - if any - is done with the scan
            // only set moreResults to false if the results is empty. This is used to keep compatible
            // with the old scan implementation where we just ignore the returned results if moreResults
            // is false. Can remove the isEmpty check after we get rid of the old implementation.
            moreResults = false;
        } else if (limitOfRows > 0 && !results.isEmpty() && !results.get(results.size() - 1).mayHaveMoreCellsInRow() && ConnectionUtils.numberOfIndividualRows(results) >= limitOfRows) {
            // if we have reached the limit of rows
            moreResults = false;
        }
        addResults(builder, results, (HBaseRpcController) controller, RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()), isClientCellBlockSupport(context));
        if (!moreResults || !moreResultsInRegion || closeScanner) {
            scannerClosed = true;
            closeScanner(region, scanner, scannerName, context);
        }
        builder.setMoreResults(moreResults);
        return builder.build();
    } catch (Exception e) {
        try {
            // scanner is closed here
            scannerClosed = true;
            // The scanner state might be left in a dirty state, so we will tell the Client to
            // fail this RPC and close the scanner while opening up another one from the start of
            // row that the client has last seen.
            closeScanner(region, scanner, scannerName, context);
            // the client. See ClientScanner code to see how it deals with these special exceptions.
            if (e instanceof DoNotRetryIOException) {
                throw e;
            }
            // DoNotRetryIOException. This can avoid the retry in ClientScanner.
            if (e instanceof FileNotFoundException) {
                throw new DoNotRetryIOException(e);
            }
            // a special exception to save an RPC.
            if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
                // 1.4.0+ clients know how to handle
                throw new ScannerResetException("Scanner is closed on the server-side", e);
            } else {
                // older clients do not know about SRE. Just throw USE, which they will handle
                throw new UnknownScannerException("Throwing UnknownScannerException to reset the client" + " scanner state for clients older than 1.3.", e);
            }
        } catch (IOException ioe) {
            throw new ServiceException(ioe);
        }
    } finally {
        if (!scannerClosed) {
            // the closeCallBack will be set in closeScanner so here we only care about shippedCallback
            if (context != null) {
                context.setCallBack(rsh.shippedCallback);
            } else {
                // When context != null, adding back the lease will be done in callback set above.
                addScannerLeaseBack(lease);
            }
        }
        quota.close();
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) MutableObject(org.apache.commons.lang.mutable.MutableObject) RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) ScanResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) Lease(org.apache.hadoop.hbase.regionserver.Leases.Lease) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) FileNotFoundException(java.io.FileNotFoundException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) BindException(java.net.BindException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) KeeperException(org.apache.zookeeper.KeeperException) LeaseStillHeldException(org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)

Aggregations

ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)95 IOException (java.io.IOException)76 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)65 InterruptedIOException (java.io.InterruptedIOException)28 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)24 ServerName (org.apache.hadoop.hbase.ServerName)16 QosPriority (org.apache.hadoop.hbase.ipc.QosPriority)15 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)14 ArrayList (java.util.ArrayList)12 HBaseRpcController (org.apache.hadoop.hbase.ipc.HBaseRpcController)12 TableName (org.apache.hadoop.hbase.TableName)10 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)10 Test (org.junit.Test)9 CellScanner (org.apache.hadoop.hbase.CellScanner)5 UnknownRegionException (org.apache.hadoop.hbase.UnknownRegionException)4 ClusterConnection (org.apache.hadoop.hbase.client.ClusterConnection)4 Result (org.apache.hadoop.hbase.client.Result)4 ForeignException (org.apache.hadoop.hbase.errorhandling.ForeignException)4 RpcCallContext (org.apache.hadoop.hbase.ipc.RpcCallContext)4 OperationQuota (org.apache.hadoop.hbase.quotas.OperationQuota)4