Search in sources :

Example 1 with HashJoinCacheNotFoundException

use of org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException in project phoenix by apache.

the class HashJoinCacheIT method testExpiredCache.

@Test
public void testExpiredCache() throws Exception {
    Properties props = PropertiesUtil.deepCopy(TEST_PROPERTIES);
    props.setProperty(QueryServices.MAX_SERVER_CACHE_TIME_TO_LIVE_MS_ATTRIB, "1");
    Connection conn = DriverManager.getConnection(getUrl(), props);
    String tableName1 = getTableName(conn, JOIN_SUPPLIER_TABLE_FULL_NAME);
    String tableName2 = getTableName(conn, JOIN_ITEM_TABLE_FULL_NAME);
    String query = "SELECT item.\"item_id\", item.name, supp.\"supplier_id\", supp.name FROM " + tableName1 + " supp RIGHT JOIN " + tableName2 + " item ON item.\"supplier_id\" = supp.\"supplier_id\" ORDER BY \"item_id\"";
    try {
        PreparedStatement statement = conn.prepareStatement(query);
        ResultSet rs = statement.executeQuery();
        rs.next();
        fail("HashJoinCacheNotFoundException was not thrown or incorrectly handled");
    } catch (HashJoinCacheNotFoundException e) {
    // Expected exception
    }
}
Also used : Connection(java.sql.Connection) ResultSet(java.sql.ResultSet) PreparedStatement(java.sql.PreparedStatement) Properties(java.util.Properties) HashJoinCacheNotFoundException(org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException) Test(org.junit.Test)

Example 2 with HashJoinCacheNotFoundException

use of org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException in project phoenix by apache.

the class TableResultIterator method next.

@Override
public Tuple next() throws SQLException {
    try {
        renewLeaseLock.lock();
        initScanner();
        try {
            lastTuple = scanIterator.next();
            if (lastTuple != null) {
                ImmutableBytesWritable ptr = new ImmutableBytesWritable();
                lastTuple.getKey(ptr);
            }
        } catch (SQLException e) {
            try {
                throw ServerUtil.parseServerException(e);
            } catch (StaleRegionBoundaryCacheException | HashJoinCacheNotFoundException e1) {
                if (ScanUtil.isNonAggregateScan(scan)) {
                    // For non aggregate queries if we get stale region boundary exception we can
                    // continue scanning from the next value of lasted fetched result.
                    Scan newScan = ScanUtil.newScan(scan);
                    newScan.setStartRow(newScan.getAttribute(SCAN_ACTUAL_START_ROW));
                    if (lastTuple != null) {
                        lastTuple.getKey(ptr);
                        byte[] startRowSuffix = ByteUtil.copyKeyBytesIfNecessary(ptr);
                        if (ScanUtil.isLocalIndex(newScan)) {
                            // If we just set scan start row suffix then server side we prepare
                            // actual scan boundaries by prefixing the region start key.
                            newScan.setAttribute(SCAN_START_ROW_SUFFIX, ByteUtil.nextKey(startRowSuffix));
                        } else {
                            newScan.setStartRow(ByteUtil.nextKey(startRowSuffix));
                        }
                    }
                    plan.getContext().getConnection().getQueryServices().clearTableRegionCache(htable.getTableName());
                    if (e1 instanceof HashJoinCacheNotFoundException) {
                        logger.debug("Retrying when Hash Join cache is not found on the server ,by sending the cache again");
                        if (retry <= 0) {
                            throw e1;
                        }
                        retry--;
                        try {
                            Long cacheId = ((HashJoinCacheNotFoundException) e1).getCacheId();
                            ServerCache cache = caches == null ? null : caches.get(new ImmutableBytesPtr(Bytes.toBytes(cacheId)));
                            if (!hashCacheClient.addHashCacheToServer(newScan.getStartRow(), cache, plan.getTableRef().getTable())) {
                                throw e1;
                            }
                            this.scanIterator = ((BaseQueryPlan) plan).iterator(caches, scanGrouper, newScan);
                        } catch (Exception e2) {
                            throw new SQLException(e2);
                        }
                    } else {
                        this.scanIterator = plan.iterator(scanGrouper, newScan);
                    }
                    lastTuple = scanIterator.next();
                } else {
                    throw e;
                }
            }
        }
        return lastTuple;
    } finally {
        renewLeaseLock.unlock();
    }
}
Also used : ServerCache(org.apache.phoenix.cache.ServerCacheClient.ServerCache) ImmutableBytesWritable(org.apache.hadoop.hbase.io.ImmutableBytesWritable) SQLException(java.sql.SQLException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) Scan(org.apache.hadoop.hbase.client.Scan) HashJoinCacheNotFoundException(org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException) HashJoinCacheNotFoundException(org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException) StaleRegionBoundaryCacheException(org.apache.phoenix.schema.StaleRegionBoundaryCacheException) SQLException(java.sql.SQLException) IOException(java.io.IOException)

Example 3 with HashJoinCacheNotFoundException

use of org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException in project phoenix by apache.

the class BaseResultIterators method getIterators.

private List<PeekingResultIterator> getIterators(List<List<Scan>> scan, ConnectionQueryServices services, boolean isLocalIndex, Queue<PeekingResultIterator> allIterators, List<PeekingResultIterator> iterators, boolean isReverse, long maxQueryEndTime, int splitSize, ScanWrapper previousScan, int retryCount) throws SQLException {
    boolean success = false;
    final List<List<Pair<Scan, Future<PeekingResultIterator>>>> futures = Lists.newArrayListWithExpectedSize(splitSize);
    allFutures.add(futures);
    SQLException toThrow = null;
    final HashCacheClient hashCacheClient = new HashCacheClient(context.getConnection());
    int queryTimeOut = context.getStatement().getQueryTimeoutInMillis();
    try {
        submitWork(scan, futures, allIterators, splitSize, isReverse, scanGrouper);
        boolean clearedCache = false;
        for (List<Pair<Scan, Future<PeekingResultIterator>>> future : reverseIfNecessary(futures, isReverse)) {
            List<PeekingResultIterator> concatIterators = Lists.newArrayListWithExpectedSize(future.size());
            Iterator<Pair<Scan, Future<PeekingResultIterator>>> scanPairItr = reverseIfNecessary(future, isReverse).iterator();
            while (scanPairItr.hasNext()) {
                Pair<Scan, Future<PeekingResultIterator>> scanPair = scanPairItr.next();
                try {
                    long timeOutForScan = maxQueryEndTime - EnvironmentEdgeManager.currentTimeMillis();
                    if (timeOutForScan < 0) {
                        throw new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms").build().buildException();
                    }
                    if (isLocalIndex && previousScan != null && previousScan.getScan() != null && (((!isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), previousScan.getScan().getStopRow()) < 0) || (isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), previousScan.getScan().getStopRow()) > 0) || (Bytes.compareTo(scanPair.getFirst().getStopRow(), previousScan.getScan().getStopRow()) == 0)) && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_START_ROW_SUFFIX), previousScan.getScan().getAttribute(SCAN_START_ROW_SUFFIX)) == 0)) {
                        continue;
                    }
                    PeekingResultIterator iterator = scanPair.getSecond().get(timeOutForScan, TimeUnit.MILLISECONDS);
                    concatIterators.add(iterator);
                    previousScan.setScan(scanPair.getFirst());
                } catch (ExecutionException e) {
                    try {
                        // Rethrow as SQLException
                        throw ServerUtil.parseServerException(e);
                    } catch (StaleRegionBoundaryCacheException | HashJoinCacheNotFoundException e2) {
                        // Catch only to try to recover from region boundary cache being out of date
                        if (!clearedCache) {
                            // Clear cache once so that we rejigger job based on new boundaries
                            services.clearTableRegionCache(physicalTableName);
                            context.getOverallQueryMetrics().cacheRefreshedDueToSplits();
                        }
                        // Resubmit just this portion of work again
                        Scan oldScan = scanPair.getFirst();
                        byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW);
                        if (e2 instanceof HashJoinCacheNotFoundException) {
                            logger.debug("Retrying when Hash Join cache is not found on the server ,by sending the cache again");
                            if (retryCount <= 0) {
                                throw e2;
                            }
                            Long cacheId = ((HashJoinCacheNotFoundException) e2).getCacheId();
                            if (!hashCacheClient.addHashCacheToServer(startKey, caches.get(new ImmutableBytesPtr(Bytes.toBytes(cacheId))), plan.getTableRef().getTable())) {
                                throw e2;
                            }
                        }
                        concatIterators = recreateIterators(services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, previousScan, clearedCache, concatIterators, scanPairItr, scanPair, retryCount - 1);
                    } catch (ColumnFamilyNotFoundException cfnfe) {
                        if (scanPair.getFirst().getAttribute(LOCAL_INDEX_BUILD) != null) {
                            Thread.sleep(1000);
                            concatIterators = recreateIterators(services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, previousScan, clearedCache, concatIterators, scanPairItr, scanPair, retryCount);
                        }
                    }
                }
            }
            addIterator(iterators, concatIterators);
        }
        success = true;
        return iterators;
    } catch (TimeoutException e) {
        context.getOverallQueryMetrics().queryTimedOut();
        GLOBAL_QUERY_TIMEOUT_COUNTER.increment();
        // thrown when a thread times out waiting for the future.get() call to return
        toThrow = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms").setRootCause(e).build().buildException();
    } catch (SQLException e) {
        toThrow = e;
    } catch (Exception e) {
        toThrow = ServerUtil.parseServerException(e);
    } finally {
        try {
            if (!success) {
                try {
                    close();
                } catch (Exception e) {
                    if (toThrow == null) {
                        toThrow = ServerUtil.parseServerException(e);
                    } else {
                        toThrow.setNextException(ServerUtil.parseServerException(e));
                    }
                } finally {
                    try {
                        SQLCloseables.closeAll(allIterators);
                    } catch (Exception e) {
                        if (toThrow == null) {
                            toThrow = ServerUtil.parseServerException(e);
                        } else {
                            toThrow.setNextException(ServerUtil.parseServerException(e));
                        }
                    }
                }
            }
        } finally {
            if (toThrow != null) {
                GLOBAL_FAILED_QUERY_COUNTER.increment();
                context.getOverallQueryMetrics().queryFailed();
                throw toThrow;
            }
        }
    }
    // Not reachable
    return null;
}
Also used : SQLException(java.sql.SQLException) ArrayList(java.util.ArrayList) List(java.util.List) ImmutableList(com.google.common.collect.ImmutableList) ExecutionException(java.util.concurrent.ExecutionException) SQLExceptionInfo(org.apache.phoenix.exception.SQLExceptionInfo) Pair(org.apache.hadoop.hbase.util.Pair) TimeoutException(java.util.concurrent.TimeoutException) ImmutableBytesPtr(org.apache.phoenix.hbase.index.util.ImmutableBytesPtr) HashCacheClient(org.apache.phoenix.join.HashCacheClient) HashJoinCacheNotFoundException(org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException) Hint(org.apache.phoenix.parse.HintNode.Hint) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) HashJoinCacheNotFoundException(org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException) StaleRegionBoundaryCacheException(org.apache.phoenix.schema.StaleRegionBoundaryCacheException) ColumnFamilyNotFoundException(org.apache.phoenix.schema.ColumnFamilyNotFoundException) SQLException(java.sql.SQLException) ExecutionException(java.util.concurrent.ExecutionException) TimeoutException(java.util.concurrent.TimeoutException) EOFException(java.io.EOFException) Future(java.util.concurrent.Future) Scan(org.apache.hadoop.hbase.client.Scan)

Example 4 with HashJoinCacheNotFoundException

use of org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException in project phoenix by apache.

the class ServerUtil method parseRemoteException.

private static SQLException parseRemoteException(Throwable t) {
    String message = t.getLocalizedMessage();
    if (message != null) {
        // If the message matches the standard pattern, recover the SQLException and throw it.
        Matcher matcher = PATTERN.matcher(t.getLocalizedMessage());
        if (matcher.find()) {
            int statusCode = Integer.parseInt(matcher.group(1));
            SQLExceptionCode code = SQLExceptionCode.fromErrorCode(statusCode);
            if (code.equals(SQLExceptionCode.HASH_JOIN_CACHE_NOT_FOUND)) {
                Matcher m = HASH_JOIN_EXCEPTION_PATTERN.matcher(t.getLocalizedMessage());
                if (m.find()) {
                    return new HashJoinCacheNotFoundException(Long.parseLong(m.group(1)));
                }
            }
            return new SQLExceptionInfo.Builder(code).setMessage(matcher.group()).setRootCause(t).build().buildException();
        }
    }
    return null;
}
Also used : SQLExceptionCode(org.apache.phoenix.exception.SQLExceptionCode) Matcher(java.util.regex.Matcher) HashJoinCacheNotFoundException(org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException)

Aggregations

HashJoinCacheNotFoundException (org.apache.phoenix.coprocessor.HashJoinCacheNotFoundException)4 SQLException (java.sql.SQLException)2 Scan (org.apache.hadoop.hbase.client.Scan)2 ImmutableBytesPtr (org.apache.phoenix.hbase.index.util.ImmutableBytesPtr)2 StaleRegionBoundaryCacheException (org.apache.phoenix.schema.StaleRegionBoundaryCacheException)2 ImmutableList (com.google.common.collect.ImmutableList)1 EOFException (java.io.EOFException)1 IOException (java.io.IOException)1 Connection (java.sql.Connection)1 PreparedStatement (java.sql.PreparedStatement)1 ResultSet (java.sql.ResultSet)1 ArrayList (java.util.ArrayList)1 List (java.util.List)1 Properties (java.util.Properties)1 ExecutionException (java.util.concurrent.ExecutionException)1 Future (java.util.concurrent.Future)1 TimeoutException (java.util.concurrent.TimeoutException)1 Matcher (java.util.regex.Matcher)1 ImmutableBytesWritable (org.apache.hadoop.hbase.io.ImmutableBytesWritable)1 Pair (org.apache.hadoop.hbase.util.Pair)1