use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class H2TreeIndex method createLookupBatch.
/**
* {@inheritDoc}
*/
@Override
public IndexLookupBatch createLookupBatch(TableFilter[] filters, int filter) {
QueryContext qctx = H2Utils.context(filters[filter].getSession());
if (qctx == null || qctx.distributedJoinContext() == null || !getTable().isPartitioned())
return null;
IndexColumn affCol = getTable().getAffinityKeyColumn();
GridH2RowDescriptor desc = getTable().rowDescriptor();
int affColId = -1;
boolean ucast = false;
if (affCol != null) {
affColId = affCol.column.getColumnId();
int[] masks = filters[filter].getMasks();
if (masks != null) {
ucast = (masks[affColId] & IndexCondition.EQUALITY) != 0 || desc.checkKeyIndexCondition(masks, IndexCondition.EQUALITY);
}
}
return new DistributedLookupBatch(this, cctx, ucast, affColId);
}
use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class H2TreeIndex method find.
/**
* {@inheritDoc}
*/
@Override
public Cursor find(Session ses, SearchRow lower, SearchRow upper) {
assert lower == null || lower instanceof H2Row : lower;
assert upper == null || upper instanceof H2Row : upper;
try {
T2<IndexRow, IndexRow> key = prepareIndexKeys(lower, upper);
QueryContext qctx = ses != null ? H2Utils.context(ses) : null;
GridCursor<IndexRow> cursor = queryIndex.find(key.get1(), key.get2(), true, true, segment(qctx), idxQryContext(qctx));
GridCursor<H2Row> h2cursor = new IndexValueCursor<>(cursor, this::mapIndexRow);
return new H2Cursor(h2cursor);
} catch (IgniteCheckedException e) {
throw DbException.convert(e);
}
}
use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class H2Utils method setupConnection.
/**
* @param conn Connection to use.
* @param qctx Query context.
* @param distributedJoins If distributed joins are enabled.
* @param enforceJoinOrder Enforce join order of tables.
* @param lazy Lazy query execution mode.
*/
public static void setupConnection(H2PooledConnection conn, QueryContext qctx, boolean distributedJoins, boolean enforceJoinOrder, boolean lazy) {
Session s = session(conn);
s.setForceJoinOrder(enforceJoinOrder);
s.setJoinBatchEnabled(distributedJoins);
s.setLazyQueryExecution(lazy);
QueryContext oldCtx = (QueryContext) s.getVariable(QCTX_VARIABLE_NAME).getObject();
assert oldCtx == null || oldCtx == qctx : oldCtx;
s.setVariable(QCTX_VARIABLE_NAME, new ValueRuntimeSimpleObject<>(qctx));
// Hack with thread local context is used only for H2 methods that is called without Session object.
// e.g. GridH2Table.getRowCountApproximation (used only on optimization phase, after parse).
QueryContext.threadLocal(qctx);
}
use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.
the class GridReduceQueryExecutor method query.
/**
* @param qryId Query ID.
* @param schemaName Schema name.
* @param qry Query.
* @param keepBinary Keep binary.
* @param enforceJoinOrder Enforce join order of tables.
* @param timeoutMillis Timeout in milliseconds.
* @param cancel Query cancel.
* @param params Query parameters.
* @param parts Partitions.
* @param lazy Lazy execution flag.
* @param mvccTracker Query tracker.
* @param dataPageScanEnabled If data page scan is enabled.
* @param pageSize Page size.
* @return Rows iterator.
*/
@SuppressWarnings("IfMayBeConditional")
public Iterator<List<?>> query(long qryId, String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, int[] parts, boolean lazy, MvccQueryTracker mvccTracker, Boolean dataPageScanEnabled, int pageSize) {
assert !qry.mvccEnabled() || mvccTracker != null;
if (pageSize <= 0)
pageSize = Query.DFLT_PAGE_SIZE;
// If explicit partitions are set, but there are no real tables, ignore.
if (!qry.hasCacheIds() && parts != null)
parts = null;
// Partitions are not supported for queries over all replicated caches.
if (parts != null && qry.isReplicatedOnly())
throw new CacheException("Partitions are not supported for replicated caches");
try {
if (qry.mvccEnabled())
checkActive(tx(ctx));
} catch (IgniteTxAlreadyCompletedCheckedException e) {
throw new TransactionAlreadyCompletedException(e.getMessage(), e);
}
final boolean singlePartMode = parts != null && parts.length == 1;
if (F.isEmpty(params))
params = EMPTY_PARAMS;
List<Integer> cacheIds = qry.cacheIds();
List<GridCacheSqlQuery> mapQueries = prepareMapQueries(qry, params, singlePartMode);
final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable() || singlePartMode;
final long retryTimeout = retryTimeout(timeoutMillis);
final long qryStartTime = U.currentTimeMillis();
ReduceQueryRun lastRun = null;
for (int attempt = 0; ; attempt++) {
ensureQueryNotCancelled(cancel);
if (attempt > 0) {
throttleOnRetry(lastRun, qryStartTime, retryTimeout, attempt);
ensureQueryNotCancelled(cancel);
}
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
// Check if topology has changed while retrying on locked topology.
if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
}
ReducePartitionMapResult mapping = createMapping(qry, parts, cacheIds, topVer);
if (// Can't map query.
mapping == null)
// Retry.
continue;
final Collection<ClusterNode> nodes = mapping.nodes();
final Map<ClusterNode, Integer> nodeToSegmentsCnt = createNodeToSegmentsCountMapping(qry, mapping);
assert !F.isEmpty(nodes);
H2PooledConnection conn = h2.connections().connection(schemaName);
final long qryReqId = qryReqIdGen.incrementAndGet();
h2.runningQueryManager().trackRequestId(qryReqId);
boolean release = true;
try {
final ReduceQueryRun r = createReduceQueryRun(conn, mapQueries, nodes, pageSize, nodeToSegmentsCnt, skipMergeTbl, qry.explain(), dataPageScanEnabled);
runs.put(qryReqId, r);
try {
cancel.add(() -> send(nodes, new GridQueryCancelRequest(qryReqId), null, true));
GridH2QueryRequest req = new GridH2QueryRequest().queryId(qryId).requestId(qryReqId).topologyVersion(topVer).pageSize(pageSize).caches(qry.cacheIds()).tables(qry.distributedJoins() ? qry.tables() : null).partitions(convert(mapping.partitionsMap())).queries(mapQueries).parameters(params).flags(queryFlags(qry, enforceJoinOrder, lazy, dataPageScanEnabled)).timeout(timeoutMillis).explicitTimeout(true).schemaName(schemaName);
if (mvccTracker != null)
req.mvccSnapshot(mvccTracker.snapshot());
final C2<ClusterNode, Message, Message> spec = parts == null ? null : new ReducePartitionsSpecializer(mapping.queryPartitionsMap());
boolean retry = false;
if (send(nodes, req, spec, false)) {
awaitAllReplies(r, nodes, cancel);
if (r.hasErrorOrRetry()) {
CacheException err = r.exception();
if (err != null) {
if (err.getCause() instanceof IgniteClientDisconnectedException)
throw err;
else if (QueryUtils.wasCancelled(err))
// Throw correct exception.
throw new QueryCancelledException();
throw err;
}
// If remote node asks us to retry then we have outdated full partition map.
h2.awaitForReadyTopologyVersion(r.retryTopologyVersion());
retry = true;
}
} else
retry = true;
if (retry) {
lastRun = runs.get(qryReqId);
assert lastRun != null;
// Retry.
continue;
}
Iterator<List<?>> resIter;
if (skipMergeTbl) {
resIter = new ReduceIndexIterator(this, nodes, r, qryReqId, qry.distributedJoins(), mvccTracker, ctx.tracing());
release = false;
U.close(conn, log);
} else {
ensureQueryNotCancelled(cancel);
QueryContext qctx = new QueryContext(0, null, null, null, null, true);
H2Utils.setupConnection(conn, qctx, false, enforceJoinOrder);
if (qry.explain())
return explainPlan(conn, qry, params);
GridCacheSqlQuery rdc = qry.reduceQuery();
final PreparedStatement stmt = conn.prepareStatementNoCache(rdc.query());
H2Utils.bindParameters(stmt, F.asList(rdc.parameters(params)));
ReduceH2QueryInfo qryInfo = new ReduceH2QueryInfo(stmt, qry.originalSql(), ctx.localNodeId(), qryId, qryReqId);
ResultSet res = h2.executeSqlQueryWithTimer(stmt, conn, rdc.query(), timeoutMillis, cancel, dataPageScanEnabled, qryInfo);
resIter = new H2FieldsIterator(res, mvccTracker, conn, r.pageSize(), log, h2, qryInfo, ctx.tracing());
conn = null;
// To prevent callback inside finally block;
mvccTracker = null;
}
return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
if (e instanceof CacheException) {
if (QueryUtils.wasCancelled(e))
throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
throw (CacheException) e;
}
Throwable cause = e;
if (e instanceof IgniteCheckedException) {
Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
if (disconnectedErr != null)
cause = disconnectedErr;
}
throw new CacheException("Failed to run reduce query locally. " + cause.getMessage(), cause);
} finally {
if (release) {
releaseRemoteResources(nodes, r, qryReqId, qry.distributedJoins(), mvccTracker);
if (!skipMergeTbl) {
for (int i = 0, mapQrys = mapQueries.size(); i < mapQrys; i++) // Drop all merge tables.
fakeTable(null, i).innerTable(null);
}
}
}
} finally {
if (conn != null && release)
U.close(conn, log);
}
}
}
Aggregations