use of org.apache.phoenix.join.HashCacheClient in project phoenix by apache.
the class HashJoinPlan method iterator.
@Override
public ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
if (scan == null) {
scan = delegate.getContext().getScan();
}
int count = subPlans.length;
PhoenixConnection connection = getContext().getConnection();
ConnectionQueryServices services = connection.getQueryServices();
ExecutorService executor = services.getExecutor();
List<Future<ServerCache>> futures = Lists.newArrayListWithExpectedSize(count);
if (joinInfo != null) {
hashClient = hashClient != null ? hashClient : new HashCacheClient(delegate.getContext().getConnection());
firstJobEndTime = new AtomicLong(0);
keyRangeExpressions = new CopyOnWriteArrayList<Expression>();
}
for (int i = 0; i < count; i++) {
final int index = i;
futures.add(executor.submit(new JobCallable<ServerCache>() {
@Override
public ServerCache call() throws Exception {
ServerCache cache = subPlans[index].execute(HashJoinPlan.this);
return cache;
}
@Override
public Object getJobId() {
return HashJoinPlan.this;
}
@Override
public TaskExecutionMetricsHolder getTaskExecutionMetric() {
return NO_OP_INSTANCE;
}
}));
}
SQLException firstException = null;
for (int i = 0; i < count; i++) {
try {
ServerCache result = futures.get(i).get();
if (result != null) {
dependencies.put(new ImmutableBytesPtr(result.getId()), result);
}
subPlans[i].postProcess(result, this);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
if (firstException == null) {
firstException = new SQLExceptionInfo.Builder(SQLExceptionCode.INTERRUPTED_EXCEPTION).setRootCause(e).setMessage("Sub plan [" + i + "] execution interrupted.").build().buildException();
}
} catch (ExecutionException e) {
if (firstException == null) {
firstException = new SQLException("Encountered exception in sub plan [" + i + "] execution.", e.getCause());
}
}
}
if (firstException != null) {
SQLCloseables.closeAllQuietly(dependencies.values());
throw firstException;
}
Expression postFilter = null;
boolean hasKeyRangeExpressions = keyRangeExpressions != null && !keyRangeExpressions.isEmpty();
if (recompileWhereClause || hasKeyRangeExpressions) {
StatementContext context = delegate.getContext();
PTable table = context.getCurrentTable().getTable();
ParseNode viewWhere = table.getViewStatement() == null ? null : new SQLParser(table.getViewStatement()).parseQuery().getWhere();
context.setResolver(FromCompiler.getResolverForQuery((SelectStatement) (delegate.getStatement()), delegate.getContext().getConnection()));
if (recompileWhereClause) {
postFilter = WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, null);
}
if (hasKeyRangeExpressions) {
WhereCompiler.compile(delegate.getContext(), delegate.getStatement(), viewWhere, keyRangeExpressions, true, null);
}
}
if (joinInfo != null) {
HashJoinInfo.serializeHashJoinIntoScan(scan, joinInfo);
}
ResultIterator iterator = joinInfo == null ? delegate.iterator(scanGrouper, scan) : ((BaseQueryPlan) delegate).iterator(dependencies, scanGrouper, scan);
if (statement.getInnerSelectStatement() != null && postFilter != null) {
iterator = new FilterResultIterator(iterator, postFilter);
}
return iterator;
}
use of org.apache.phoenix.join.HashCacheClient in project phoenix by apache.
the class BaseResultIterators method getIterators.
private List<PeekingResultIterator> getIterators(List<List<Scan>> scan, ConnectionQueryServices services, boolean isLocalIndex, Queue<PeekingResultIterator> allIterators, List<PeekingResultIterator> iterators, boolean isReverse, long maxQueryEndTime, int splitSize, ScanWrapper previousScan, int retryCount) throws SQLException {
boolean success = false;
final List<List<Pair<Scan, Future<PeekingResultIterator>>>> futures = Lists.newArrayListWithExpectedSize(splitSize);
allFutures.add(futures);
SQLException toThrow = null;
final HashCacheClient hashCacheClient = new HashCacheClient(context.getConnection());
int queryTimeOut = context.getStatement().getQueryTimeoutInMillis();
try {
submitWork(scan, futures, allIterators, splitSize, isReverse, scanGrouper);
boolean clearedCache = false;
for (List<Pair<Scan, Future<PeekingResultIterator>>> future : reverseIfNecessary(futures, isReverse)) {
List<PeekingResultIterator> concatIterators = Lists.newArrayListWithExpectedSize(future.size());
Iterator<Pair<Scan, Future<PeekingResultIterator>>> scanPairItr = reverseIfNecessary(future, isReverse).iterator();
while (scanPairItr.hasNext()) {
Pair<Scan, Future<PeekingResultIterator>> scanPair = scanPairItr.next();
try {
long timeOutForScan = maxQueryEndTime - EnvironmentEdgeManager.currentTimeMillis();
if (timeOutForScan < 0) {
throw new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms").build().buildException();
}
if (isLocalIndex && previousScan != null && previousScan.getScan() != null && (((!isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), previousScan.getScan().getStopRow()) < 0) || (isReverse && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_ACTUAL_START_ROW), previousScan.getScan().getStopRow()) > 0) || (Bytes.compareTo(scanPair.getFirst().getStopRow(), previousScan.getScan().getStopRow()) == 0)) && Bytes.compareTo(scanPair.getFirst().getAttribute(SCAN_START_ROW_SUFFIX), previousScan.getScan().getAttribute(SCAN_START_ROW_SUFFIX)) == 0)) {
continue;
}
PeekingResultIterator iterator = scanPair.getSecond().get(timeOutForScan, TimeUnit.MILLISECONDS);
concatIterators.add(iterator);
previousScan.setScan(scanPair.getFirst());
} catch (ExecutionException e) {
try {
// Rethrow as SQLException
throw ServerUtil.parseServerException(e);
} catch (StaleRegionBoundaryCacheException | HashJoinCacheNotFoundException e2) {
// Catch only to try to recover from region boundary cache being out of date
if (!clearedCache) {
// Clear cache once so that we rejigger job based on new boundaries
services.clearTableRegionCache(physicalTableName);
context.getOverallQueryMetrics().cacheRefreshedDueToSplits();
}
// Resubmit just this portion of work again
Scan oldScan = scanPair.getFirst();
byte[] startKey = oldScan.getAttribute(SCAN_ACTUAL_START_ROW);
if (e2 instanceof HashJoinCacheNotFoundException) {
logger.debug("Retrying when Hash Join cache is not found on the server ,by sending the cache again");
if (retryCount <= 0) {
throw e2;
}
Long cacheId = ((HashJoinCacheNotFoundException) e2).getCacheId();
if (!hashCacheClient.addHashCacheToServer(startKey, caches.get(new ImmutableBytesPtr(Bytes.toBytes(cacheId))), plan.getTableRef().getTable())) {
throw e2;
}
}
concatIterators = recreateIterators(services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, previousScan, clearedCache, concatIterators, scanPairItr, scanPair, retryCount - 1);
} catch (ColumnFamilyNotFoundException cfnfe) {
if (scanPair.getFirst().getAttribute(LOCAL_INDEX_BUILD) != null) {
Thread.sleep(1000);
concatIterators = recreateIterators(services, isLocalIndex, allIterators, iterators, isReverse, maxQueryEndTime, previousScan, clearedCache, concatIterators, scanPairItr, scanPair, retryCount);
}
}
}
}
addIterator(iterators, concatIterators);
}
success = true;
return iterators;
} catch (TimeoutException e) {
context.getOverallQueryMetrics().queryTimedOut();
GLOBAL_QUERY_TIMEOUT_COUNTER.increment();
// thrown when a thread times out waiting for the future.get() call to return
toThrow = new SQLExceptionInfo.Builder(SQLExceptionCode.OPERATION_TIMED_OUT).setMessage(". Query couldn't be completed in the alloted time: " + queryTimeOut + " ms").setRootCause(e).build().buildException();
} catch (SQLException e) {
toThrow = e;
} catch (Exception e) {
toThrow = ServerUtil.parseServerException(e);
} finally {
try {
if (!success) {
try {
close();
} catch (Exception e) {
if (toThrow == null) {
toThrow = ServerUtil.parseServerException(e);
} else {
toThrow.setNextException(ServerUtil.parseServerException(e));
}
} finally {
try {
SQLCloseables.closeAll(allIterators);
} catch (Exception e) {
if (toThrow == null) {
toThrow = ServerUtil.parseServerException(e);
} else {
toThrow.setNextException(ServerUtil.parseServerException(e));
}
}
}
}
} finally {
if (toThrow != null) {
GLOBAL_FAILED_QUERY_COUNTER.increment();
context.getOverallQueryMetrics().queryFailed();
throw toThrow;
}
}
}
// Not reachable
return null;
}
Aggregations