Search in sources :

Example 1 with StaleRegionBoundaryCacheException

use of org.apache.phoenix.schema.StaleRegionBoundaryCacheException in project phoenix by apache.

the class BaseScannerRegionObserver method throwIfScanOutOfRegion.

private static void throwIfScanOutOfRegion(Scan scan, Region region) throws DoNotRetryIOException {
    boolean isLocalIndex = ScanUtil.isLocalIndex(scan);
    byte[] lowerInclusiveScanKey = scan.getStartRow();
    byte[] upperExclusiveScanKey = scan.getStopRow();
    byte[] lowerInclusiveRegionKey = region.getRegionInfo().getStartKey();
    byte[] upperExclusiveRegionKey = region.getRegionInfo().getEndKey();
    boolean isStaleRegionBoundaries;
    if (isLocalIndex) {
        byte[] expectedUpperRegionKey = scan.getAttribute(EXPECTED_UPPER_REGION_KEY) == null ? scan.getStopRow() : scan.getAttribute(EXPECTED_UPPER_REGION_KEY);
        isStaleRegionBoundaries = expectedUpperRegionKey != null && Bytes.compareTo(upperExclusiveRegionKey, expectedUpperRegionKey) != 0;
    } else {
        isStaleRegionBoundaries = Bytes.compareTo(lowerInclusiveScanKey, lowerInclusiveRegionKey) < 0 || (Bytes.compareTo(upperExclusiveScanKey, upperExclusiveRegionKey) > 0 && upperExclusiveRegionKey.length != 0) || (upperExclusiveRegionKey.length != 0 && upperExclusiveScanKey.length == 0);
    }
    if (isStaleRegionBoundaries) {
        Exception cause = new StaleRegionBoundaryCacheException(region.getRegionInfo().getTable().getNameAsString());
        throw new DoNotRetryIOException(cause.getMessage(), cause);
    }
    if (isLocalIndex) {
        ScanUtil.setupLocalIndexScan(scan, lowerInclusiveRegionKey, upperExclusiveRegionKey);
    }
}
Also used : DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) StaleRegionBoundaryCacheException(org.apache.phoenix.schema.StaleRegionBoundaryCacheException) StaleRegionBoundaryCacheException(org.apache.phoenix.schema.StaleRegionBoundaryCacheException) NotServingRegionException(org.apache.hadoop.hbase.NotServingRegionException) DoNotRetryIOException(org.apache.hadoop.hbase.DoNotRetryIOException) IOException(java.io.IOException)

Example 2 with StaleRegionBoundaryCacheException

use of org.apache.phoenix.schema.StaleRegionBoundaryCacheException in project phoenix by apache.

the class TableResultIterator method next.

@Override
public Tuple next() throws SQLException {
    try {
        renewLeaseLock.lock();
        initScanner();
        try {
            lastTuple = scanIterator.next();
            if (lastTuple != null) {
                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                lastTuple.getKey(ptr);
            }
        } catch (SQLException e) {
            try {
                throw ServerUtil.parseServerException(e);
            } catch (StaleRegionBoundaryCacheException e1) {
                if (ScanUtil.isNonAggregateScan(scan)) {
                    // For non aggregate queries if we get stale region boundary exception we can
                    // continue scanning from the next value of lasted fetched result.
                    Scan newScan = ScanUtil.newScan(scan);
                    newScan.setStartRow(newScan.getAttribute(SCAN_ACTUAL_START_ROW));
                    if (lastTuple != null) {
                        lastTuple.getKey(ptr);
                        byte[] startRowSuffix = ByteUtil.copyKeyBytesIfNecessary(ptr);
                        if (ScanUtil.isLocalIndex(newScan)) {
                            // If we just set scan start row suffix then server side we prepare
                            // actual scan boundaries by prefixing the region start key.
                            newScan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.nextKey(startRowSuffix));
                        } else {
                            newScan.setStartRow(ByteUtil.nextKey(startRowSuffix));
                        }
                    }
                    plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getTableName());
                    this.scanIterator = plan.iterator(scanGrouper, newScan);
                    lastTuple = scanIterator.next();
                } else {
                    throw e;
                }
            }
        }
        return lastTuple;
    } finally {
        renewLeaseLock.unlock();
    }
}
Also used : ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) SQLException(java.sql.SQLException) StaleRegionBoundaryCacheException(org.apache.phoenix.schema.StaleRegionBoundaryCacheException) Scan(org.apache.hadoop.hbase.client.Scan)

Example 3 with StaleRegionBoundaryCacheException

use of org.apache.phoenix.schema.StaleRegionBoundaryCacheException in project phoenix by apache.

the class BaseResultIterators method getIterators.

private List<PeekingResultIterator> getIterators(List<List<Scan>> scan, ConnectionQueryServices services, boolean isLocalIndex, Queue<PeekingResultIterator> allIterators, List<PeekingResultIterator> iterators, boolean isReverse, long maxQueryEndTime, int splitSize, ScanWrapper previousScan) throws SQLException {
    boolean success = false;
    final List<List<Pair<Scan, Future<PeekingResultIterator>>>> futures = Lists.newArrayListWithExpectedSize(splitSize);
    allFutures.add(futures);
    SQLException toThrow = null;
    int queryTimeOut = context.getStatement().getQueryTimeoutInMillis();
    try {
        submitWork(scan, futures, allIterators, splitSize, isReverse, scanGrouper);
        boolean clearedCache = false;
        for (List<Pair<Scan, Future<PeekingResultIterator>>> future : reverseIfNecessary(futures, isReverse)) {
            List<PeekingResultIterator> concatIterators = Lists.newArrayListWithExpectedSize(future.size());
            Iterator<Pair<Scan, Future<PeekingResultIterator>>> scanPairItr = reverseIfNecessary(future, isReverse).iterator();
            while (scanPairItr.hasNext()) {
                Pair<Scan, Future<PeekingResultIterator>> scanPair = scanPairItr.next();
                try {
                    long timeOutForScan = maxQueryEndTime - System.currentTimeMillis();
                    if (timeOutForScan < 0) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms").build().buildException();
                    }
                    if (isLocalIndex && previousScan != null && previousScan.getScan() != null && (((!isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), previousScan.getScan().getStopRow()) < 0) || (isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), previousScan.getScan().getStopRow()) > 0) || (Bytes.compareTo(scanPair.getFirst().getStopRow(), previousScan.getScan().getStopRow()) == 0)) && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_START_ROW_SUFFIX), previousScan.getScan().getAttribute(SCAN_START_ROW_SUFFIX)) == 0)) {
                        continue;
                    }
                    PeekingResultIterator iterator = scanPair.getSecond().get(timeOutForScan, TimeUnit.MILLISECONDS);
                    concatIterators.add(iterator);
                    previousScan.setScan(scanPair.getFirst());
                } catch (ExecutionException e) {
                    try {
                        // Rethrow as SQLException
                        throw ServerUtil.parseServerException(e);
                    } catch (StaleRegionBoundaryCacheException e2) {
                        scanPairItr.remove();
                        // Catch only to try to recover from region boundary cache being out of date
                        if (!clearedCache) {
                            // Clear cache once so that we rejigger job based on new boundaries
                            services.clearTableRegionCache(physicalTableName);
                            context.getOverallQueryMetrics().cacheRefreshedDueToSplits();
                        }
                        // Resubmit just this portion of work again
                        Scan oldScan = scanPair.getFirst();
                        byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW);
                        byte[] endKey = oldScan.getStopRow();
                        List<List<Scan>> newNestedScans = this.getParallelScans(startKey, endKey);
                        // Add any concatIterators that were successful so far
                        // as we need these to be in order
                        addIterator(iterators, concatIterators);
                        concatIterators = Lists.newArrayList();
                        getIterators(newNestedScans, services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, newNestedScans.size(), previousScan);
                    }
                }
            }
            addIterator(iterators, concatIterators);
        }
        success = true;
        return iterators;
    } catch (TimeoutException e) {
        context.getOverallQueryMetrics().queryTimedOut();
        GLOBAL_QUERY_TIMEOUT_COUNTER.increment();
        // thrown when a thread times out waiting for the future.get() call to return
        toThrow = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms").setRootCause(e).build().buildException();
    } catch (SQLException e) {
        toThrow = e;
    } catch (Exception e) {
        toThrow = ServerUtil.parseServerException(e);
    } finally {
        try {
            if (!success) {
                try {
                    close();
                } catch (Exception e) {
                    if (toThrow == null) {
                        toThrow = ServerUtil.parseServerException(e);
                    } else {
                        toThrow.setNextException(ServerUtil.parseServerException(e));
                    }
                } finally {
                    try {
                        SQLCloseables.closeAll(allIterators);
                    } catch (Exception e) {
                        if (toThrow == null) {
                            toThrow = ServerUtil.parseServerException(e);
                        } else {
                            toThrow.setNextException(ServerUtil.parseServerException(e));
                        }
                    }
                }
            }
        } finally {
            if (toThrow != null) {
                GLOBAL_FAILED_QUERY_COUNTER.increment();
                context.getOverallQueryMetrics().queryFailed();
                throw toThrow;
            }
        }
    }
    // Not reachable
    return null;
}
Also used : SQLException(java.sql.SQLException) Hint(org.apache.phoenix.parse.HintNode.Hint) TimeoutException(java.util.concurrent.TimeoutException) StaleRegionBoundaryCacheException(org.apache.phoenix.schema.StaleRegionBoundaryCacheException) EOFException(java.io.EOFException) SQLException(java.sql.SQLException) ExecutionException(java.util.concurrent.ExecutionException) StaleRegionBoundaryCacheException(org.apache.phoenix.schema.StaleRegionBoundaryCacheException) Future(java.util.concurrent.Future) List(java.util.List) ArrayList(java.util.ArrayList) ImmutableList(com.google.common.collect.ImmutableList) Scan(org.apache.hadoop.hbase.client.Scan) ExecutionException(java.util.concurrent.ExecutionException) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Pair(org.apache.hadoop.hbase.util.Pair) TimeoutException(java.util.concurrent.TimeoutException)

Aggregations

StaleRegionBoundaryCacheException (org.apache.phoenix.schema.StaleRegionBoundaryCacheException)3 SQLException (java.sql.SQLException)2 Scan (org.apache.hadoop.hbase.client.Scan)2 ImmutableList (com.google.common.collect.ImmutableList)1 EOFException (java.io.EOFException)1 IOException (java.io.IOException)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 ExecutionException (java.util.concurrent.ExecutionException)1 Future (java.util.concurrent.Future)1 TimeoutException (java.util.concurrent.TimeoutException)1 DoNotRetryIOException (org.apache.hadoop.hbase.DoNotRetryIOException)1 NotServingRegionException (org.apache.hadoop.hbase.NotServingRegionException)1 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)1 Pair (org.apache.hadoop.hbase.util.Pair)1 SQLExceptionInfo (org.apache.phoenix.exception.SQLExceptionInfo)1 Hint (org.apache.phoenix.parse.HintNode.Hint)1