use of org.apache.phoenix.schema.StaleRegionBoundaryCacheException in project phoenix by apache.
the class BaseScannerRegionObserver method throwIfScanOutOfRegion.
private static void throwIfScanOutOfRegion(Scan scan, Region region) throws DoNotRetryIOException {
boolean isLocalIndex = ScanUtil.isLocalIndex(scan);
byte[] lowerInclusiveScanKey = scan.getStartRow();
byte[] upperExclusiveScanKey = scan.getStopRow();
byte[] lowerInclusiveRegionKey = region.getRegionInfo().getStartKey();
byte[] upperExclusiveRegionKey = region.getRegionInfo().getEndKey();
boolean isStaleRegionBoundaries;
if (isLocalIndex) {
byte[] expectedUpperRegionKey = scan.getAttribute(EXPECTED_UPPER_REGION_KEY) == null ? scan.getStopRow() : scan.getAttribute(EXPECTED_UPPER_REGION_KEY);
isStaleRegionBoundaries = expectedUpperRegionKey != null && Bytes.compareTo(upperExclusiveRegionKey, expectedUpperRegionKey) != 0;
} else {
isStaleRegionBoundaries = Bytes.compareTo(lowerInclusiveScanKey, lowerInclusiveRegionKey) < 0 || (Bytes.compareTo(upperExclusiveScanKey, upperExclusiveRegionKey) > 0 && upperExclusiveRegionKey.length != 0) || (upperExclusiveRegionKey.length != 0 && upperExclusiveScanKey.length == 0);
}
if (isStaleRegionBoundaries) {
Exception cause = new StaleRegionBoundaryCacheException(region.getRegionInfo().getTable().getNameAsString());
throw new DoNotRetryIOException(cause.getMessage(), cause);
}
if (isLocalIndex) {
ScanUtil.setupLocalIndexScan(scan, lowerInclusiveRegionKey, upperExclusiveRegionKey);
}
}
use of org.apache.phoenix.schema.StaleRegionBoundaryCacheException in project phoenix by apache.
the class TableResultIterator method next.
@Override
public Tuple next() throws SQLException {
try {
renewLeaseLock.lock();
initScanner();
try {
lastTuple = scanIterator.next();
if (lastTuple != null) {
ImmutableBytesWritable ptr = new ImmutableBytesWritable();
lastTuple.getKey(ptr);
}
} catch (SQLException e) {
try {
throw ServerUtil.parseServerException(e);
} catch (StaleRegionBoundaryCacheException e1) {
if (ScanUtil.isNonAggregateScan(scan)) {
// For non aggregate queries if we get stale region boundary exception we can
// continue scanning from the next value of lasted fetched result.
Scan newScan = ScanUtil.newScan(scan);
newScan.setStartRow(newScan.getAttribute(SCAN_ACTUAL_START_ROW));
if (lastTuple != null) {
lastTuple.getKey(ptr);
byte[] startRowSuffix = ByteUtil.copyKeyBytesIfNecessary(ptr);
if (ScanUtil.isLocalIndex(newScan)) {
// If we just set scan start row suffix then server side we prepare
// actual scan boundaries by prefixing the region start key.
newScan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.nextKey(startRowSuffix));
} else {
newScan.setStartRow(ByteUtil.nextKey(startRowSuffix));
}
}
plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getTableName());
this.scanIterator = plan.iterator(scanGrouper, newScan);
lastTuple = scanIterator.next();
} else {
throw e;
}
}
}
return lastTuple;
} finally {
renewLeaseLock.unlock();
}
}
use of org.apache.phoenix.schema.StaleRegionBoundaryCacheException in project phoenix by apache.
the class BaseResultIterators method getIterators.
private List<PeekingResultIterator> getIterators(List<List<Scan>> scan, ConnectionQueryServices services, boolean isLocalIndex, Queue<PeekingResultIterator> allIterators, List<PeekingResultIterator> iterators, boolean isReverse, long maxQueryEndTime, int splitSize, ScanWrapper previousScan) throws SQLException {
boolean success = false;
final List<List<Pair<Scan, Future<PeekingResultIterator>>>> futures = Lists.newArrayListWithExpectedSize(splitSize);
allFutures.add(futures);
SQLException toThrow = null;
int queryTimeOut = context.getStatement().getQueryTimeoutInMillis();
try {
submitWork(scan, futures, allIterators, splitSize, isReverse, scanGrouper);
boolean clearedCache = false;
for (List<Pair<Scan, Future<PeekingResultIterator>>> future : reverseIfNecessary(futures, isReverse)) {
List<PeekingResultIterator> concatIterators = Lists.newArrayListWithExpectedSize(future.size());
Iterator<Pair<Scan, Future<PeekingResultIterator>>> scanPairItr = reverseIfNecessary(future, isReverse).iterator();
while (scanPairItr.hasNext()) {
Pair<Scan, Future<PeekingResultIterator>> scanPair = scanPairItr.next();
try {
long timeOutForScan = maxQueryEndTime - System.currentTimeMillis();
if (timeOutForScan < 0) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms").build().buildException();
}
if (isLocalIndex && previousScan != null && previousScan.getScan() != null && (((!isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), previousScan.getScan().getStopRow()) < 0) || (isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), previousScan.getScan().getStopRow()) > 0) || (Bytes.compareTo(scanPair.getFirst().getStopRow(), previousScan.getScan().getStopRow()) == 0)) && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_START_ROW_SUFFIX), previousScan.getScan().getAttribute(SCAN_START_ROW_SUFFIX)) == 0)) {
continue;
}
PeekingResultIterator iterator = scanPair.getSecond().get(timeOutForScan, TimeUnit.MILLISECONDS);
concatIterators.add(iterator);
previousScan.setScan(scanPair.getFirst());
} catch (ExecutionException e) {
try {
// Rethrow as SQLException
throw ServerUtil.parseServerException(e);
} catch (StaleRegionBoundaryCacheException e2) {
scanPairItr.remove();
// Catch only to try to recover from region boundary cache being out of date
if (!clearedCache) {
// Clear cache once so that we rejigger job based on new boundaries
services.clearTableRegionCache(physicalTableName);
context.getOverallQueryMetrics().cacheRefreshedDueToSplits();
}
// Resubmit just this portion of work again
Scan oldScan = scanPair.getFirst();
byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW);
byte[] endKey = oldScan.getStopRow();
List<List<Scan>> newNestedScans = this.getParallelScans(startKey, endKey);
// Add any concatIterators that were successful so far
// as we need these to be in order
addIterator(iterators, concatIterators);
concatIterators = Lists.newArrayList();
getIterators(newNestedScans, services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, newNestedScans.size(), previousScan);
}
}
}
addIterator(iterators, concatIterators);
}
success = true;
return iterators;
} catch (TimeoutException e) {
context.getOverallQueryMetrics().queryTimedOut();
GLOBAL_QUERY_TIMEOUT_COUNTER.increment();
// thrown when a thread times out waiting for the future.get() call to return
toThrow = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms").setRootCause(e).build().buildException();
} catch (SQLException e) {
toThrow = e;
} catch (Exception e) {
toThrow = ServerUtil.parseServerException(e);
} finally {
try {
if (!success) {
try {
close();
} catch (Exception e) {
if (toThrow == null) {
toThrow = ServerUtil.parseServerException(e);
} else {
toThrow.setNextException(ServerUtil.parseServerException(e));
}
} finally {
try {
SQLCloseables.closeAll(allIterators);
} catch (Exception e) {
if (toThrow == null) {
toThrow = ServerUtil.parseServerException(e);
} else {
toThrow.setNextException(ServerUtil.parseServerException(e));
}
}
}
}
} finally {
if (toThrow != null) {
GLOBAL_FAILED_QUERY_COUNTER.increment();
context.getOverallQueryMetrics().queryFailed();
throw toThrow;
}
}
}
// Not reachable
return null;
}
Aggregations