Search in sources :

Example 26 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class RSRpcServices method scan.

/**
   * Scan data in a table.
   *
   * @param controller the RPC controller
   * @param request the scan request
   * @throws ServiceException
   */
@Override
public ScanResponse scan(final RpcController controller, final ScanRequest request) throws ServiceException {
    if (controller != null && !(controller instanceof HBaseRpcController)) {
        throw new UnsupportedOperationException("We only do " + "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
    }
    if (!request.hasScannerId() && !request.hasScan()) {
        throw new ServiceException(new DoNotRetryIOException("Missing required input: scannerId or scan"));
    }
    try {
        checkOpen();
    } catch (IOException e) {
        if (request.hasScannerId()) {
            String scannerName = Long.toString(request.getScannerId());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Server shutting down and client tried to access missing scanner " + scannerName);
            }
            if (regionServer.leases != null) {
                try {
                    regionServer.leases.cancelLease(scannerName);
                } catch (LeaseException le) {
                    // No problem, ignore
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
                    }
                }
            }
        }
        throw new ServiceException(e);
    }
    requestCount.increment();
    rpcScanRequestCount.increment();
    RegionScannerHolder rsh;
    ScanResponse.Builder builder = ScanResponse.newBuilder();
    try {
        if (request.hasScannerId()) {
            rsh = getRegionScanner(request);
        } else {
            rsh = newRegionScanner(request, builder);
        }
    } catch (IOException e) {
        if (e == SCANNER_ALREADY_CLOSED) {
            // the old client will still send a close request to us. Just ignore it and return.
            return builder.build();
        }
        throw new ServiceException(e);
    }
    Region region = rsh.r;
    String scannerName = rsh.scannerName;
    Leases.Lease lease;
    try {
        // Remove lease while its being processed in server; protects against case
        // where processing of request takes > lease expiration time.
        lease = regionServer.leases.removeLease(scannerName);
    } catch (LeaseException e) {
        throw new ServiceException(e);
    }
    if (request.hasRenew() && request.getRenew()) {
        // add back and return
        addScannerLeaseBack(lease);
        try {
            checkScanNextCallSeq(request, rsh);
        } catch (OutOfOrderScannerNextException e) {
            throw new ServiceException(e);
        }
        return builder.build();
    }
    OperationQuota quota;
    try {
        quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
    } catch (IOException e) {
        addScannerLeaseBack(lease);
        throw new ServiceException(e);
    }
    ;
    try {
        checkScanNextCallSeq(request, rsh);
    } catch (OutOfOrderScannerNextException e) {
        addScannerLeaseBack(lease);
        throw new ServiceException(e);
    }
    // Now we have increased the next call sequence. If we give client an error, the retry will
    // never success. So we'd better close the scanner and return a DoNotRetryIOException to client
    // and then client will try to open a new scanner.
    boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
    // this is scan.getCaching
    int rows;
    if (request.hasNumberOfRows()) {
        rows = request.getNumberOfRows();
    } else {
        rows = closeScanner ? 0 : 1;
    }
    RpcCallContext context = RpcServer.getCurrentCall();
    // now let's do the real scan.
    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
    RegionScanner scanner = rsh.s;
    boolean moreResults = true;
    boolean moreResultsInRegion = true;
    // this is the limit of rows for this scan, if we the number of rows reach this value, we will
    // close the scanner.
    int limitOfRows;
    if (request.hasLimitOfRows()) {
        limitOfRows = request.getLimitOfRows();
        rows = Math.min(rows, limitOfRows);
    } else {
        limitOfRows = -1;
    }
    MutableObject lastBlock = new MutableObject();
    boolean scannerClosed = false;
    try {
        List<Result> results = new ArrayList<>();
        if (rows > 0) {
            boolean done = false;
            // Call coprocessor. Get region info from scanner.
            if (region.getCoprocessorHost() != null) {
                Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
                if (!results.isEmpty()) {
                    for (Result r : results) {
                        lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
                    }
                }
                if (bypass != null && bypass.booleanValue()) {
                    done = true;
                }
            }
            if (!done) {
                moreResultsInRegion = scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, results, builder, lastBlock, context);
            }
        }
        quota.addScanResult(results);
        if (scanner.isFilterDone() && results.isEmpty()) {
            // If the scanner's filter - if any - is done with the scan
            // only set moreResults to false if the results is empty. This is used to keep compatible
            // with the old scan implementation where we just ignore the returned results if moreResults
            // is false. Can remove the isEmpty check after we get rid of the old implementation.
            moreResults = false;
        } else if (limitOfRows > 0 && !results.isEmpty() && !results.get(results.size() - 1).mayHaveMoreCellsInRow() && ConnectionUtils.numberOfIndividualRows(results) >= limitOfRows) {
            // if we have reached the limit of rows
            moreResults = false;
        }
        addResults(builder, results, (HBaseRpcController) controller, RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()), isClientCellBlockSupport(context));
        if (!moreResults || !moreResultsInRegion || closeScanner) {
            scannerClosed = true;
            closeScanner(region, scanner, scannerName, context);
        }
        builder.setMoreResults(moreResults);
        return builder.build();
    } catch (Exception e) {
        try {
            // scanner is closed here
            scannerClosed = true;
            // The scanner state might be left in a dirty state, so we will tell the Client to
            // fail this RPC and close the scanner while opening up another one from the start of
            // row that the client has last seen.
            closeScanner(region, scanner, scannerName, context);
            // the client. See ClientScanner code to see how it deals with these special exceptions.
            if (e instanceof DoNotRetryIOException) {
                throw e;
            }
            // DoNotRetryIOException. This can avoid the retry in ClientScanner.
            if (e instanceof FileNotFoundException) {
                throw new DoNotRetryIOException(e);
            }
            // a special exception to save an RPC.
            if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
                // 1.4.0+ clients know how to handle
                throw new ScannerResetException("Scanner is closed on the server-side", e);
            } else {
                // older clients do not know about SRE. Just throw USE, which they will handle
                throw new UnknownScannerException("Throwing UnknownScannerException to reset the client" + " scanner state for clients older than 1.3.", e);
            }
        } catch (IOException ioe) {
            throw new ServiceException(ioe);
        }
    } finally {
        if (!scannerClosed) {
            // the closeCallBack will be set in closeScanner so here we only care about shippedCallback
            if (context != null) {
                context.setCallBack(rsh.shippedCallback);
            } else {
                // When context != null, adding back the lease will be done in callback set above.
                addScannerLeaseBack(lease);
            }
        }
        quota.close();
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) MutableObject(org.apache.commons.lang.mutable.MutableObject) RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) ScanResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) Lease(org.apache.hadoop.hbase.regionserver.Leases.Lease) FailedSanityCheckException(org.apache.hadoop.hbase.exceptions.FailedSanityCheckException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) ResultOrException(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ResultOrException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) UnknownHostException(java.net.UnknownHostException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) FileNotFoundException(java.io.FileNotFoundException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) BindException(java.net.BindException) DroppedSnapshotException(org.apache.hadoop.hbase.DroppedSnapshotException) KeeperException(org.apache.zookeeper.KeeperException) LeaseStillHeldException(org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException) ServerNotRunningYetException(org.apache.hadoop.hbase.ipc.ServerNotRunningYetException) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)

Example 27 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class RSRpcServices method getRegion.

/**
   * Find the HRegion based on a region specifier
   *
   * @param regionSpecifier the region specifier
   * @return the corresponding region
   * @throws IOException if the specifier is not null,
   *    but failed to find the region
   */
@VisibleForTesting
public Region getRegion(final RegionSpecifier regionSpecifier) throws IOException {
    ByteString value = regionSpecifier.getValue();
    RegionSpecifierType type = regionSpecifier.getType();
    switch(type) {
        case REGION_NAME:
            byte[] regionName = value.toByteArray();
            String encodedRegionName = HRegionInfo.encodeRegionName(regionName);
            return regionServer.getRegionByEncodedName(regionName, encodedRegionName);
        case ENCODED_REGION_NAME:
            return regionServer.getRegionByEncodedName(value.toStringUtf8());
        default:
            throw new DoNotRetryIOException("Unsupported region specifier type: " + type);
    }
}
Also used : RegionSpecifierType(org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.RegionSpecifier.RegionSpecifierType) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) VisibleForTesting(com.google.common.annotations.VisibleForTesting)

Example 28 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class SecureBulkLoadManager method secureBulkLoadHFiles.

public Map<byte[], List<Path>> secureBulkLoadHFiles(final Region region, final BulkLoadHFileRequest request) throws IOException {
    final List<Pair<byte[], String>> familyPaths = new ArrayList<>(request.getFamilyPathCount());
    for (ClientProtos.BulkLoadHFileRequest.FamilyPath el : request.getFamilyPathList()) {
        familyPaths.add(new Pair<>(el.getFamily().toByteArray(), el.getPath()));
    }
    Token userToken = null;
    if (userProvider.isHadoopSecurityEnabled()) {
        userToken = new Token(request.getFsToken().getIdentifier().toByteArray(), request.getFsToken().getPassword().toByteArray(), new Text(request.getFsToken().getKind()), new Text(request.getFsToken().getService()));
    }
    final String bulkToken = request.getBulkToken();
    User user = getActiveUser();
    final UserGroupInformation ugi = user.getUGI();
    if (userProvider.isHadoopSecurityEnabled()) {
        try {
            Token tok = TokenUtil.obtainToken(conn);
            if (tok != null) {
                boolean b = ugi.addToken(tok);
                LOG.debug("token added " + tok + " for user " + ugi + " return=" + b);
            }
        } catch (IOException ioe) {
            LOG.warn("unable to add token", ioe);
        }
    }
    if (userToken != null) {
        ugi.addToken(userToken);
    } else if (userProvider.isHadoopSecurityEnabled()) {
        //for mini cluster testing
        throw new DoNotRetryIOException("User token cannot be null");
    }
    boolean bypass = false;
    if (region.getCoprocessorHost() != null) {
        bypass = region.getCoprocessorHost().preBulkLoadHFile(familyPaths);
    }
    boolean loaded = false;
    Map<byte[], List<Path>> map = null;
    try {
        if (!bypass) {
            // ('request user'), another for the target fs (HBase region server principal).
            if (userProvider.isHadoopSecurityEnabled()) {
                FsDelegationToken targetfsDelegationToken = new FsDelegationToken(userProvider, "renewer");
                targetfsDelegationToken.acquireDelegationToken(fs);
                Token<?> targetFsToken = targetfsDelegationToken.getUserToken();
                if (targetFsToken != null && (userToken == null || !targetFsToken.getService().equals(userToken.getService()))) {
                    ugi.addToken(targetFsToken);
                }
            }
            map = ugi.doAs(new PrivilegedAction<Map<byte[], List<Path>>>() {

                @Override
                public Map<byte[], List<Path>> run() {
                    FileSystem fs = null;
                    try {
                        fs = FileSystem.get(conf);
                        for (Pair<byte[], String> el : familyPaths) {
                            Path stageFamily = new Path(bulkToken, Bytes.toString(el.getFirst()));
                            if (!fs.exists(stageFamily)) {
                                fs.mkdirs(stageFamily);
                                fs.setPermission(stageFamily, PERM_ALL_ACCESS);
                            }
                        }
                        //To enable access prior to staging
                        return region.bulkLoadHFiles(familyPaths, true, new SecureBulkLoadListener(fs, bulkToken, conf), request.getCopyFile());
                    } catch (Exception e) {
                        LOG.error("Failed to complete bulk load", e);
                    }
                    return null;
                }
            });
            if (map != null) {
                loaded = true;
            }
        }
    } finally {
        if (region.getCoprocessorHost() != null) {
            region.getCoprocessorHost().postBulkLoadHFile(familyPaths, map, loaded);
        }
    }
    return map;
}
Also used : Path(org.apache.hadoop.fs.Path) User(org.apache.hadoop.hbase.security.User) BulkLoadHFileRequest(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.BulkLoadHFileRequest) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ArrayList(java.util.ArrayList) Token(org.apache.hadoop.security.token.Token) FsDelegationToken(org.apache.hadoop.hbase.security.token.FsDelegationToken) Text(org.apache.hadoop.io.Text) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException) FsDelegationToken(org.apache.hadoop.hbase.security.token.FsDelegationToken) PrivilegedAction(java.security.PrivilegedAction) FileSystem(org.apache.hadoop.fs.FileSystem) ArrayList(java.util.ArrayList) List(java.util.List) Pair(org.apache.hadoop.hbase.util.Pair) UserGroupInformation(org.apache.hadoop.security.UserGroupInformation)

Example 29 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class RSRpcServices method closeRegion.

/**
   * Close a region on the region server.
   *
   * @param controller the RPC controller
   * @param request the request
   * @throws ServiceException
   */
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public CloseRegionResponse closeRegion(final RpcController controller, final CloseRegionRequest request) throws ServiceException {
    final ServerName sn = (request.hasDestinationServer() ? ProtobufUtil.toServerName(request.getDestinationServer()) : null);
    try {
        checkOpen();
        if (request.hasServerStartCode()) {
            // check that we are the same server that this RPC is intended for.
            long serverStartCode = request.getServerStartCode();
            if (regionServer.serverName.getStartcode() != serverStartCode) {
                throw new ServiceException(new DoNotRetryIOException("This RPC was intended for a " + "different server with startCode: " + serverStartCode + ", this server is: " + regionServer.serverName));
            }
        }
        final String encodedRegionName = ProtobufUtil.getRegionEncodedName(request.getRegion());
        requestCount.increment();
        if (sn == null) {
            LOG.info("Close " + encodedRegionName + " without moving");
        } else {
            LOG.info("Close " + encodedRegionName + ", moving to " + sn);
        }
        boolean closed = regionServer.closeRegion(encodedRegionName, false, sn);
        CloseRegionResponse.Builder builder = CloseRegionResponse.newBuilder().setClosed(closed);
        return builder.build();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ServerName(org.apache.hadoop.hbase.ServerName) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) CloseRegionResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.CloseRegionResponse) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Example 30 with DoNotRetryIOException

use of org.apache.hadoop.hbase.DoNotRetryIOException in project hbase by apache.

the class RSRpcServices method compactRegion.

/**
   * Compact a region on the region server.
   *
   * @param controller the RPC controller
   * @param request the request
   * @throws ServiceException
   */
@Override
@QosPriority(priority = HConstants.ADMIN_QOS)
public CompactRegionResponse compactRegion(final RpcController controller, final CompactRegionRequest request) throws ServiceException {
    try {
        checkOpen();
        requestCount.increment();
        Region region = getRegion(request.getRegion());
        region.startRegionOperation(Operation.COMPACT_REGION);
        LOG.info("Compacting " + region.getRegionInfo().getRegionNameAsString());
        boolean major = false;
        byte[] family = null;
        Store store = null;
        if (request.hasFamily()) {
            family = request.getFamily().toByteArray();
            store = region.getStore(family);
            if (store == null) {
                throw new ServiceException(new DoNotRetryIOException("column family " + Bytes.toString(family) + " does not exist in region " + region.getRegionInfo().getRegionNameAsString()));
            }
        }
        if (request.hasMajor()) {
            major = request.getMajor();
        }
        if (major) {
            if (family != null) {
                store.triggerMajorCompaction();
            } else {
                region.triggerMajorCompaction();
            }
        }
        String familyLogMsg = (family != null) ? " for column family: " + Bytes.toString(family) : "";
        if (LOG.isTraceEnabled()) {
            LOG.trace("User-triggered compaction requested for region " + region.getRegionInfo().getRegionNameAsString() + familyLogMsg);
        }
        String log = "User-triggered " + (major ? "major " : "") + "compaction" + familyLogMsg;
        if (family != null) {
            regionServer.compactSplitThread.requestCompaction(region, store, log, Store.PRIORITY_USER, null, RpcServer.getRequestUser());
        } else {
            regionServer.compactSplitThread.requestCompaction(region, log, Store.PRIORITY_USER, null, RpcServer.getRequestUser());
        }
        return CompactRegionResponse.newBuilder().build();
    } catch (IOException ie) {
        throw new ServiceException(ie);
    }
}
Also used : ServiceException(org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ByteString(org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString) InterruptedIOException(java.io.InterruptedIOException) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) QosPriority(org.apache.hadoop.hbase.ipc.QosPriority)

Aggregations

DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)77 IOException (java.io.IOException)28 Cell (org.apache.hadoop.hbase.Cell)18 ArrayList (java.util.ArrayList)12 ServiceException (org.apache.hadoop.hbase.shaded.com.google.protobuf.ServiceException)12 MutationType (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.MutationProto.MutationType)12 TableName (org.apache.hadoop.hbase.TableName)11 InterruptedIOException (java.io.InterruptedIOException)10 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)10 Delete (org.apache.hadoop.hbase.client.Delete)10 Put (org.apache.hadoop.hbase.client.Put)10 Test (org.junit.Test)10 AccessDeniedException (org.apache.hadoop.hbase.security.AccessDeniedException)9 User (org.apache.hadoop.hbase.security.User)8 Mutation (org.apache.hadoop.hbase.client.Mutation)7 ByteString (org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString)7 HTableDescriptor (org.apache.hadoop.hbase.HTableDescriptor)6 NameBytesPair (org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos.NameBytesPair)6 ByteBufferCell (org.apache.hadoop.hbase.ByteBufferCell)5 HRegionInfo (org.apache.hadoop.hbase.HRegionInfo)5