Search in sources :

Example 1 with UnknownScannerException

use of org.apache.hadoop.hbase.UnknownScannerException in project hbase by apache.

the class ScannerResultGenerator method next.

@Override
public Cell next() {
    if (cache != null) {
        Cell kv = cache;
        cache = null;
        return kv;
    }
    boolean loop;
    do {
        loop = false;
        if (rowI != null) {
            if (rowI.hasNext()) {
                return rowI.next();
            } else {
                rowI = null;
            }
        }
        if (cached != null) {
            rowI = cached.listCells().iterator();
            loop = true;
            cached = null;
        } else {
            Result result = null;
            try {
                result = scanner.next();
            } catch (UnknownScannerException e) {
                throw new IllegalArgumentException(e);
            } catch (TableNotEnabledException tnee) {
                throw new IllegalStateException(tnee);
            } catch (TableNotFoundException tnfe) {
                throw new IllegalArgumentException(tnfe);
            } catch (IOException e) {
                LOG.error(StringUtils.stringifyException(e));
            }
            if (result != null && !result.isEmpty()) {
                rowI = result.listCells().iterator();
                loop = true;
            }
        }
    } while (loop);
    return null;
}
Also used : TableNotFoundException(org.apache.hadoop.hbase.TableNotFoundException) IOException(java.io.IOException) Cell(org.apache.hadoop.hbase.Cell) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) Result(org.apache.hadoop.hbase.client.Result) TableNotEnabledException(org.apache.hadoop.hbase.TableNotEnabledException)

Example 2 with UnknownScannerException

use of org.apache.hadoop.hbase.UnknownScannerException in project hbase by apache.

the class RSRpcServices method scan.

/**
 * Scan data in a table.
 *
 * @param controller the RPC controller
 * @param request the scan request
 * @throws ServiceException
 */
@Override
public ScanResponse scan(final RpcController controller, final ScanRequest request) throws ServiceException {
    if (controller != null && !(controller instanceof HBaseRpcController)) {
        throw new UnsupportedOperationException("We only do " + "HBaseRpcControllers! FIX IF A PROBLEM: " + controller);
    }
    if (!request.hasScannerId() && !request.hasScan()) {
        throw new ServiceException(new DoNotRetryIOException("Missing required input: scannerId or scan"));
    }
    try {
        checkOpen();
    } catch (IOException e) {
        if (request.hasScannerId()) {
            String scannerName = toScannerName(request.getScannerId());
            if (LOG.isDebugEnabled()) {
                LOG.debug("Server shutting down and client tried to access missing scanner " + scannerName);
            }
            final LeaseManager leaseManager = server.getLeaseManager();
            if (leaseManager != null) {
                try {
                    leaseManager.cancelLease(scannerName);
                } catch (LeaseException le) {
                    // No problem, ignore
                    if (LOG.isTraceEnabled()) {
                        LOG.trace("Un-able to cancel lease of scanner. It could already be closed.");
                    }
                }
            }
        }
        throw new ServiceException(e);
    }
    requestCount.increment();
    rpcScanRequestCount.increment();
    RegionScannerHolder rsh;
    ScanResponse.Builder builder = ScanResponse.newBuilder();
    String scannerName;
    try {
        if (request.hasScannerId()) {
            // The downstream projects such as AsyncHBase in OpenTSDB need this value. See HBASE-18000
            // for more details.
            long scannerId = request.getScannerId();
            builder.setScannerId(scannerId);
            scannerName = toScannerName(scannerId);
            rsh = getRegionScanner(request);
        } else {
            Pair<String, RegionScannerHolder> scannerNameAndRSH = newRegionScanner(request, builder);
            scannerName = scannerNameAndRSH.getFirst();
            rsh = scannerNameAndRSH.getSecond();
        }
    } catch (IOException e) {
        if (e == SCANNER_ALREADY_CLOSED) {
            // the old client will still send a close request to us. Just ignore it and return.
            return builder.build();
        }
        throw new ServiceException(e);
    }
    if (rsh.fullRegionScan) {
        rpcFullScanRequestCount.increment();
    }
    HRegion region = rsh.r;
    LeaseManager.Lease lease;
    try {
        // Remove lease while its being processed in server; protects against case
        // where processing of request takes > lease expiration time. or null if none found.
        lease = server.getLeaseManager().removeLease(scannerName);
    } catch (LeaseException e) {
        throw new ServiceException(e);
    }
    if (request.hasRenew() && request.getRenew()) {
        // add back and return
        addScannerLeaseBack(lease);
        try {
            checkScanNextCallSeq(request, rsh);
        } catch (OutOfOrderScannerNextException e) {
            throw new ServiceException(e);
        }
        return builder.build();
    }
    OperationQuota quota;
    try {
        quota = getRpcQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
    } catch (IOException e) {
        addScannerLeaseBack(lease);
        throw new ServiceException(e);
    }
    try {
        checkScanNextCallSeq(request, rsh);
    } catch (OutOfOrderScannerNextException e) {
        addScannerLeaseBack(lease);
        throw new ServiceException(e);
    }
    // Now we have increased the next call sequence. If we give client an error, the retry will
    // never success. So we'd better close the scanner and return a DoNotRetryIOException to client
    // and then client will try to open a new scanner.
    boolean closeScanner = request.hasCloseScanner() ? request.getCloseScanner() : false;
    // this is scan.getCaching
    int rows;
    if (request.hasNumberOfRows()) {
        rows = request.getNumberOfRows();
    } else {
        rows = closeScanner ? 0 : 1;
    }
    RpcCallContext context = RpcServer.getCurrentCall().orElse(null);
    // now let's do the real scan.
    long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
    RegionScanner scanner = rsh.s;
    // this is the limit of rows for this scan, if we the number of rows reach this value, we will
    // close the scanner.
    int limitOfRows;
    if (request.hasLimitOfRows()) {
        limitOfRows = request.getLimitOfRows();
    } else {
        limitOfRows = -1;
    }
    MutableObject<Object> lastBlock = new MutableObject<>();
    boolean scannerClosed = false;
    try {
        List<Result> results = new ArrayList<>(Math.min(rows, 512));
        if (rows > 0) {
            boolean done = false;
            // Call coprocessor. Get region info from scanner.
            if (region.getCoprocessorHost() != null) {
                Boolean bypass = region.getCoprocessorHost().preScannerNext(scanner, results, rows);
                if (!results.isEmpty()) {
                    for (Result r : results) {
                        lastBlock.setValue(addSize(context, r, lastBlock.getValue()));
                    }
                }
                if (bypass != null && bypass.booleanValue()) {
                    done = true;
                }
            }
            if (!done) {
                scan((HBaseRpcController) controller, request, rsh, maxQuotaResultSize, rows, limitOfRows, results, builder, lastBlock, context);
            } else {
                builder.setMoreResultsInRegion(!results.isEmpty());
            }
        } else {
            // This is a open scanner call with numberOfRow = 0, so set more results in region to true.
            builder.setMoreResultsInRegion(true);
        }
        quota.addScanResult(results);
        addResults(builder, results, (HBaseRpcController) controller, RegionReplicaUtil.isDefaultReplica(region.getRegionInfo()), isClientCellBlockSupport(context));
        if (scanner.isFilterDone() && results.isEmpty()) {
            // If the scanner's filter - if any - is done with the scan
            // only set moreResults to false if the results is empty. This is used to keep compatible
            // with the old scan implementation where we just ignore the returned results if moreResults
            // is false. Can remove the isEmpty check after we get rid of the old implementation.
            builder.setMoreResults(false);
        }
        // have already set this flag.
        assert builder.hasMoreResultsInRegion();
        // yet.
        if (!builder.hasMoreResults()) {
            builder.setMoreResults(true);
        }
        if (builder.getMoreResults() && builder.getMoreResultsInRegion() && !results.isEmpty()) {
            // Record the last cell of the last result if it is a partial result
            // We need this to calculate the complete rows we have returned to client as the
            // mayHaveMoreCellsInRow is true does not mean that there will be extra cells for the
            // current row. We may filter out all the remaining cells for the current row and just
            // return the cells of the nextRow when calling RegionScanner.nextRaw. So here we need to
            // check for row change.
            Result lastResult = results.get(results.size() - 1);
            if (lastResult.mayHaveMoreCellsInRow()) {
                rsh.rowOfLastPartialResult = lastResult.getRow();
            } else {
                rsh.rowOfLastPartialResult = null;
            }
        }
        if (!builder.getMoreResults() || !builder.getMoreResultsInRegion() || closeScanner) {
            scannerClosed = true;
            closeScanner(region, scanner, scannerName, context);
        }
        return builder.build();
    } catch (IOException e) {
        try {
            // scanner is closed here
            scannerClosed = true;
            // The scanner state might be left in a dirty state, so we will tell the Client to
            // fail this RPC and close the scanner while opening up another one from the start of
            // row that the client has last seen.
            closeScanner(region, scanner, scannerName, context);
            // the client. See ClientScanner code to see how it deals with these special exceptions.
            if (e instanceof DoNotRetryIOException) {
                throw e;
            }
            // DoNotRetryIOException. This can avoid the retry in ClientScanner.
            if (e instanceof FileNotFoundException) {
                throw new DoNotRetryIOException(e);
            }
            // a special exception to save an RPC.
            if (VersionInfoUtil.hasMinimumVersion(context.getClientVersionInfo(), 1, 4)) {
                // 1.4.0+ clients know how to handle
                throw new ScannerResetException("Scanner is closed on the server-side", e);
            } else {
                // older clients do not know about SRE. Just throw USE, which they will handle
                throw new UnknownScannerException("Throwing UnknownScannerException to reset the client" + " scanner state for clients older than 1.3.", e);
            }
        } catch (IOException ioe) {
            throw new ServiceException(ioe);
        }
    } finally {
        if (!scannerClosed) {
            // the closeCallBack will be set in closeScanner so here we only care about shippedCallback
            if (context != null) {
                context.setCallBack(rsh.shippedCallback);
            } else {
                // When context != null, adding back the lease will be done in callback set above.
                addScannerLeaseBack(lease);
            }
        }
        quota.close();
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) ScannerResetException(org.apache.hadoop.hbase.exceptions.ScannerResetException) ArrayList(java.util.ArrayList) FileNotFoundException(java.io.FileNotFoundException) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) RegionActionResult(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.RegionActionResult) Result(org.apache.hadoop.hbase.client.Result) CheckAndMutateResult(org.apache.hadoop.hbase.client.CheckAndMutateResult) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) OutOfOrderScannerNextException(org.apache.hadoop.hbase.exceptions.OutOfOrderScannerNextException) MutableObject(org.apache.commons.lang3.mutable.MutableObject) RpcCallContext(org.apache.hadoop.hbase.ipc.RpcCallContext) Lease(org.apache.hadoop.hbase.regionserver.LeaseManager.Lease) ScanResponse(org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse) OperationQuota(org.apache.hadoop.hbase.quotas.OperationQuota) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) UncheckedIOException(java.io.UncheckedIOException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) HBaseRpcController(org.apache.hadoop.hbase.ipc.HBaseRpcController) ServiceException(org.apache.hbase.thirdparty.com.google.protobuf.ServiceException) MutableObject(org.apache.commons.lang3.mutable.MutableObject)

Example 3 with UnknownScannerException

use of org.apache.hadoop.hbase.UnknownScannerException in project hbase by apache.

the class RSRpcServices method getRegionScanner.

private RegionScannerHolder getRegionScanner(ScanRequest request) throws IOException {
    String scannerName = toScannerName(request.getScannerId());
    RegionScannerHolder rsh = this.scanners.get(scannerName);
    if (rsh == null) {
        // just ignore the next or close request if scanner does not exists.
        if (closedScanners.getIfPresent(scannerName) != null) {
            throw SCANNER_ALREADY_CLOSED;
        } else {
            LOG.warn("Client tried to access missing scanner " + scannerName);
            throw new UnknownScannerException("Unknown scanner '" + scannerName + "'. This can happen due to any of the following " + "reasons: a) Scanner id given is wrong, b) Scanner lease expired because of " + "long wait between consecutive client checkins, c) Server may be closing down, " + "d) RegionServer restart during upgrade.\nIf the issue is due to reason (b), a " + "possible fix would be increasing the value of" + "'hbase.client.scanner.timeout.period' configuration.");
        }
    }
    rejectIfInStandByState(rsh.r);
    RegionInfo hri = rsh.s.getRegionInfo();
    // Yes, should be the same instance
    if (server.getOnlineRegion(hri.getRegionName()) != rsh.r) {
        String msg = "Region has changed on the scanner " + scannerName + ": regionName=" + hri.getRegionNameAsString() + ", scannerRegionName=" + rsh.r;
        LOG.warn(msg + ", closing...");
        scanners.remove(scannerName);
        try {
            rsh.s.close();
        } catch (IOException e) {
            LOG.warn("Getting exception closing " + scannerName, e);
        } finally {
            try {
                server.getLeaseManager().cancelLease(scannerName);
            } catch (LeaseException e) {
                LOG.warn("Getting exception closing " + scannerName, e);
            }
        }
        throw new NotServingRegionException(msg);
    }
    return rsh;
}
Also used : NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) RegionInfo(org.apache.hadoop.hbase.client.RegionInfo) ByteString(org.apache.hbase.thirdparty.com.google.protobuf.ByteString) IOException(java.io.IOException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) HBaseIOException(org.apache.hadoop.hbase.HBaseIOException) UncheckedIOException(java.io.UncheckedIOException) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException)

Example 4 with UnknownScannerException

use of org.apache.hadoop.hbase.UnknownScannerException in project hbase by apache.

the class TestReplicationKillRS method loadTableAndKillRS.

/**
 * Load up 1 tables over 2 region servers and kill a source during the upload. The failover
 * happens internally. WARNING this test sometimes fails because of HBASE-3515
 */
protected void loadTableAndKillRS(HBaseTestingUtil util) throws Exception {
    // killing the RS with hbase:meta can result into failed puts until we solve
    // IO fencing
    int rsToKill1 = util.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0;
    // Takes about 20 secs to run the full loading, kill around the middle
    Thread killer = killARegionServer(util, 5000, rsToKill1);
    Result[] res;
    int initialCount;
    try (Connection conn = ConnectionFactory.createConnection(CONF1)) {
        try (Table table = conn.getTable(tableName)) {
            LOG.info("Start loading table");
            initialCount = UTIL1.loadTable(table, famName);
            LOG.info("Done loading table");
            killer.join(5000);
            LOG.info("Done waiting for threads");
            while (true) {
                try (ResultScanner scanner = table.getScanner(new Scan())) {
                    res = scanner.next(initialCount);
                    break;
                } catch (UnknownScannerException ex) {
                    LOG.info("Cluster wasn't ready yet, restarting scanner");
                }
            }
        }
    }
    // don't have IO fencing.
    if (res.length != initialCount) {
        LOG.warn("We lost some rows on the master cluster!");
        // We don't really expect the other cluster to have more rows
        initialCount = res.length;
    }
    int lastCount = 0;
    final long start = EnvironmentEdgeManager.currentTime();
    int i = 0;
    try (Connection conn = ConnectionFactory.createConnection(CONF2)) {
        try (Table table = conn.getTable(tableName)) {
            while (true) {
                if (i == NB_RETRIES - 1) {
                    fail("Waited too much time for queueFailover replication. " + "Waited " + (EnvironmentEdgeManager.currentTime() - start) + "ms.");
                }
                Result[] res2;
                try (ResultScanner scanner = table.getScanner(new Scan())) {
                    res2 = scanner.next(initialCount * 2);
                }
                if (res2.length < initialCount) {
                    if (lastCount < res2.length) {
                        // Don't increment timeout if we make progress
                        i--;
                    } else {
                        i++;
                    }
                    lastCount = res2.length;
                    LOG.info("Only got " + lastCount + " rows instead of " + initialCount + " current i=" + i);
                    Thread.sleep(SLEEP_TIME * 2);
                } else {
                    break;
                }
            }
        }
    }
}
Also used : Table(org.apache.hadoop.hbase.client.Table) ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Connection(org.apache.hadoop.hbase.client.Connection) Scan(org.apache.hadoop.hbase.client.Scan) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) Result(org.apache.hadoop.hbase.client.Result)

Example 5 with UnknownScannerException

use of org.apache.hadoop.hbase.UnknownScannerException in project hbase by apache.

the class TestReplicationKillRS method loadTableAndKillRS.

/**
   * Load up 1 tables over 2 region servers and kill a source during
   * the upload. The failover happens internally.
   *
   * WARNING this test sometimes fails because of HBASE-3515
   *
   * @throws Exception
   */
public void loadTableAndKillRS(HBaseTestingUtility util) throws Exception {
    // killing the RS with hbase:meta can result into failed puts until we solve
    // IO fencing
    int rsToKill1 = util.getHBaseCluster().getServerWithMeta() == 0 ? 1 : 0;
    // Takes about 20 secs to run the full loading, kill around the middle
    Thread killer = killARegionServer(util, 5000, rsToKill1);
    LOG.info("Start loading table");
    int initialCount = utility1.loadTable(htable1, famName);
    LOG.info("Done loading table");
    killer.join(5000);
    LOG.info("Done waiting for threads");
    Result[] res;
    while (true) {
        try {
            Scan scan = new Scan();
            ResultScanner scanner = htable1.getScanner(scan);
            res = scanner.next(initialCount);
            scanner.close();
            break;
        } catch (UnknownScannerException ex) {
            LOG.info("Cluster wasn't ready yet, restarting scanner");
        }
    }
    // don't have IO fencing.
    if (res.length != initialCount) {
        LOG.warn("We lost some rows on the master cluster!");
        // We don't really expect the other cluster to have more rows
        initialCount = res.length;
    }
    int lastCount = 0;
    final long start = System.currentTimeMillis();
    int i = 0;
    while (true) {
        if (i == NB_RETRIES - 1) {
            fail("Waited too much time for queueFailover replication. " + "Waited " + (System.currentTimeMillis() - start) + "ms.");
        }
        Scan scan2 = new Scan();
        ResultScanner scanner2 = htable2.getScanner(scan2);
        Result[] res2 = scanner2.next(initialCount * 2);
        scanner2.close();
        if (res2.length < initialCount) {
            if (lastCount < res2.length) {
                // Don't increment timeout if we make progress
                i--;
            } else {
                i++;
            }
            lastCount = res2.length;
            LOG.info("Only got " + lastCount + " rows instead of " + initialCount + " current i=" + i);
            Thread.sleep(SLEEP_TIME * 2);
        } else {
            break;
        }
    }
}
Also used : ResultScanner(org.apache.hadoop.hbase.client.ResultScanner) Scan(org.apache.hadoop.hbase.client.Scan) UnknownScannerException(org.apache.hadoop.hbase.UnknownScannerException) Result(org.apache.hadoop.hbase.client.Result)

Aggregations

UnknownScannerException (org.apache.hadoop.hbase.UnknownScannerException)7 IOException (java.io.IOException)4 Result (org.apache.hadoop.hbase.client.Result)4 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)3 HBaseIOException (org.apache.hadoop.hbase.HBaseIOException)3 Scan (org.apache.hadoop.hbase.client.Scan)3 UncheckedIOException (java.io.UncheckedIOException)2 ArrayList (java.util.ArrayList)2 Cell (org.apache.hadoop.hbase.Cell)2 NotServingRegionException (org.apache.hadoop.hbase.NotServingRegionException)2 ResultScanner (org.apache.hadoop.hbase.client.ResultScanner)2 ScannerResetException (org.apache.hadoop.hbase.exceptions.ScannerResetException)2 ScanResponse (org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos.ScanResponse)2 ByteString (org.apache.hbase.thirdparty.com.google.protobuf.ByteString)2 FileNotFoundException (java.io.FileNotFoundException)1 InterruptedIOException (java.io.InterruptedIOException)1 UnknownHostException (java.net.UnknownHostException)1 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)1 MutableObject (org.apache.commons.lang3.mutable.MutableObject)1 HRegionLocation (org.apache.hadoop.hbase.HRegionLocation)1