Search in sources :

Example 11 with QueryContext

use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.

the class H2TreeIndex method createLookupBatch.

/**
 * {@inheritDoc}
 */
@Override
public IndexLookupBatch createLookupBatch(TableFilter[] filters, int filter) {
    QueryContext qctx = H2Utils.context(filters[filter].getSession());
    if (qctx == null || qctx.distributedJoinContext() == null || !getTable().isPartitioned())
        return null;
    IndexColumn affCol = getTable().getAffinityKeyColumn();
    GridH2RowDescriptor desc = getTable().rowDescriptor();
    int affColId = -1;
    boolean ucast = false;
    if (affCol != null) {
        affColId = affCol.column.getColumnId();
        int[] masks = filters[filter].getMasks();
        if (masks != null) {
            ucast = (masks[affColId] & IndexCondition.EQUALITY) != 0 || desc.checkKeyIndexCondition(masks, IndexCondition.EQUALITY);
        }
    }
    return new DistributedLookupBatch(this, cctx, ucast, affColId);
}
Also used : DistributedLookupBatch(org.apache.ignite.internal.processors.query.h2.opt.join.DistributedLookupBatch) GridH2RowDescriptor(org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor) IndexQueryContext(org.apache.ignite.internal.cache.query.index.sorted.inline.IndexQueryContext) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) IndexColumn(org.h2.table.IndexColumn)

Example 12 with QueryContext

use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.

the class H2TreeIndex method find.

/**
 * {@inheritDoc}
 */
@Override
public Cursor find(Session ses, SearchRow lower, SearchRow upper) {
    assert lower == null || lower instanceof H2Row : lower;
    assert upper == null || upper instanceof H2Row : upper;
    try {
        T2<IndexRow, IndexRow> key = prepareIndexKeys(lower, upper);
        QueryContext qctx = ses != null ? H2Utils.context(ses) : null;
        GridCursor<IndexRow> cursor = queryIndex.find(key.get1(), key.get2(), true, true, segment(qctx), idxQryContext(qctx));
        GridCursor<H2Row> h2cursor = new IndexValueCursor<>(cursor, this::mapIndexRow);
        return new H2Cursor(h2cursor);
    } catch (IgniteCheckedException e) {
        throw DbException.convert(e);
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IndexRow(org.apache.ignite.internal.cache.query.index.sorted.IndexRow) H2Cursor(org.apache.ignite.internal.processors.query.h2.H2Cursor) IndexQueryContext(org.apache.ignite.internal.cache.query.index.sorted.inline.IndexQueryContext) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) H2Row(org.apache.ignite.internal.processors.query.h2.opt.H2Row) IndexValueCursor(org.apache.ignite.internal.cache.query.index.sorted.IndexValueCursor)

Example 13 with QueryContext

use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.

the class H2Utils method setupConnection.

/**
 * @param conn Connection to use.
 * @param qctx Query context.
 * @param distributedJoins If distributed joins are enabled.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param lazy Lazy query execution mode.
 */
public static void setupConnection(H2PooledConnection conn, QueryContext qctx, boolean distributedJoins, boolean enforceJoinOrder, boolean lazy) {
    Session s = session(conn);
    s.setForceJoinOrder(enforceJoinOrder);
    s.setJoinBatchEnabled(distributedJoins);
    s.setLazyQueryExecution(lazy);
    QueryContext oldCtx = (QueryContext) s.getVariable(QCTX_VARIABLE_NAME).getObject();
    assert oldCtx == null || oldCtx == qctx : oldCtx;
    s.setVariable(QCTX_VARIABLE_NAME, new ValueRuntimeSimpleObject<>(qctx));
    // Hack with thread local context is used only for H2 methods that is called without Session object.
    // e.g. GridH2Table.getRowCountApproximation (used only on optimization phase, after parse).
    QueryContext.threadLocal(qctx);
}
Also used : QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) Session(org.h2.engine.Session)

Example 14 with QueryContext

use of org.apache.ignite.internal.processors.query.h2.opt.QueryContext in project ignite by apache.

the class GridReduceQueryExecutor method query.

/**
 * @param qryId Query ID.
 * @param schemaName Schema name.
 * @param qry Query.
 * @param keepBinary Keep binary.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param timeoutMillis Timeout in milliseconds.
 * @param cancel Query cancel.
 * @param params Query parameters.
 * @param parts Partitions.
 * @param lazy Lazy execution flag.
 * @param mvccTracker Query tracker.
 * @param dataPageScanEnabled If data page scan is enabled.
 * @param pageSize Page size.
 * @return Rows iterator.
 */
@SuppressWarnings("IfMayBeConditional")
public Iterator<List<?>> query(long qryId, String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, int[] parts, boolean lazy, MvccQueryTracker mvccTracker, Boolean dataPageScanEnabled, int pageSize) {
    assert !qry.mvccEnabled() || mvccTracker != null;
    if (pageSize <= 0)
        pageSize = Query.DFLT_PAGE_SIZE;
    // If explicit partitions are set, but there are no real tables, ignore.
    if (!qry.hasCacheIds() && parts != null)
        parts = null;
    // Partitions are not supported for queries over all replicated caches.
    if (parts != null && qry.isReplicatedOnly())
        throw new CacheException("Partitions are not supported for replicated caches");
    try {
        if (qry.mvccEnabled())
            checkActive(tx(ctx));
    } catch (IgniteTxAlreadyCompletedCheckedException e) {
        throw new TransactionAlreadyCompletedException(e.getMessage(), e);
    }
    final boolean singlePartMode = parts != null && parts.length == 1;
    if (F.isEmpty(params))
        params = EMPTY_PARAMS;
    List<Integer> cacheIds = qry.cacheIds();
    List<GridCacheSqlQuery> mapQueries = prepareMapQueries(qry, params, singlePartMode);
    final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable() || singlePartMode;
    final long retryTimeout = retryTimeout(timeoutMillis);
    final long qryStartTime = U.currentTimeMillis();
    ReduceQueryRun lastRun = null;
    for (int attempt = 0; ; attempt++) {
        ensureQueryNotCancelled(cancel);
        if (attempt > 0) {
            throttleOnRetry(lastRun, qryStartTime, retryTimeout, attempt);
            ensureQueryNotCancelled(cancel);
        }
        AffinityTopologyVersion topVer = h2.readyTopologyVersion();
        // Check if topology has changed while retrying on locked topology.
        if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
            throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
        }
        ReducePartitionMapResult mapping = createMapping(qry, parts, cacheIds, topVer);
        if (// Can't map query.
        mapping == null)
            // Retry.
            continue;
        final Collection<ClusterNode> nodes = mapping.nodes();
        final Map<ClusterNode, Integer> nodeToSegmentsCnt = createNodeToSegmentsCountMapping(qry, mapping);
        assert !F.isEmpty(nodes);
        H2PooledConnection conn = h2.connections().connection(schemaName);
        final long qryReqId = qryReqIdGen.incrementAndGet();
        h2.runningQueryManager().trackRequestId(qryReqId);
        boolean release = true;
        try {
            final ReduceQueryRun r = createReduceQueryRun(conn, mapQueries, nodes, pageSize, nodeToSegmentsCnt, skipMergeTbl, qry.explain(), dataPageScanEnabled);
            runs.put(qryReqId, r);
            try {
                cancel.add(() -> send(nodes, new GridQueryCancelRequest(qryReqId), null, true));
                GridH2QueryRequest req = new GridH2QueryRequest().queryId(qryId).requestId(qryReqId).topologyVersion(topVer).pageSize(pageSize).caches(qry.cacheIds()).tables(qry.distributedJoins() ? qry.tables() : null).partitions(convert(mapping.partitionsMap())).queries(mapQueries).parameters(params).flags(queryFlags(qry, enforceJoinOrder, lazy, dataPageScanEnabled)).timeout(timeoutMillis).explicitTimeout(true).schemaName(schemaName);
                if (mvccTracker != null)
                    req.mvccSnapshot(mvccTracker.snapshot());
                final C2<ClusterNode, Message, Message> spec = parts == null ? null : new ReducePartitionsSpecializer(mapping.queryPartitionsMap());
                boolean retry = false;
                if (send(nodes, req, spec, false)) {
                    awaitAllReplies(r, nodes, cancel);
                    if (r.hasErrorOrRetry()) {
                        CacheException err = r.exception();
                        if (err != null) {
                            if (err.getCause() instanceof IgniteClientDisconnectedException)
                                throw err;
                            else if (QueryUtils.wasCancelled(err))
                                // Throw correct exception.
                                throw new QueryCancelledException();
                            throw err;
                        }
                        // If remote node asks us to retry then we have outdated full partition map.
                        h2.awaitForReadyTopologyVersion(r.retryTopologyVersion());
                        retry = true;
                    }
                } else
                    retry = true;
                if (retry) {
                    lastRun = runs.get(qryReqId);
                    assert lastRun != null;
                    // Retry.
                    continue;
                }
                Iterator<List<?>> resIter;
                if (skipMergeTbl) {
                    resIter = new ReduceIndexIterator(this, nodes, r, qryReqId, qry.distributedJoins(), mvccTracker, ctx.tracing());
                    release = false;
                    U.close(conn, log);
                } else {
                    ensureQueryNotCancelled(cancel);
                    QueryContext qctx = new QueryContext(0, null, null, null, null, true);
                    H2Utils.setupConnection(conn, qctx, false, enforceJoinOrder);
                    if (qry.explain())
                        return explainPlan(conn, qry, params);
                    GridCacheSqlQuery rdc = qry.reduceQuery();
                    final PreparedStatement stmt = conn.prepareStatementNoCache(rdc.query());
                    H2Utils.bindParameters(stmt, F.asList(rdc.parameters(params)));
                    ReduceH2QueryInfo qryInfo = new ReduceH2QueryInfo(stmt, qry.originalSql(), ctx.localNodeId(), qryId, qryReqId);
                    ResultSet res = h2.executeSqlQueryWithTimer(stmt, conn, rdc.query(), timeoutMillis, cancel, dataPageScanEnabled, qryInfo);
                    resIter = new H2FieldsIterator(res, mvccTracker, conn, r.pageSize(), log, h2, qryInfo, ctx.tracing());
                    conn = null;
                    // To prevent callback inside finally block;
                    mvccTracker = null;
                }
                return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
            } catch (IgniteCheckedException | RuntimeException e) {
                release = true;
                if (e instanceof CacheException) {
                    if (QueryUtils.wasCancelled(e))
                        throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
                    throw (CacheException) e;
                }
                Throwable cause = e;
                if (e instanceof IgniteCheckedException) {
                    Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
                    if (disconnectedErr != null)
                        cause = disconnectedErr;
                }
                throw new CacheException("Failed to run reduce query locally. " + cause.getMessage(), cause);
            } finally {
                if (release) {
                    releaseRemoteResources(nodes, r, qryReqId, qry.distributedJoins(), mvccTracker);
                    if (!skipMergeTbl) {
                        for (int i = 0, mapQrys = mapQueries.size(); i < mapQrys; i++) // Drop all merge tables.
                        fakeTable(null, i).innerTable(null);
                    }
                }
            }
        } finally {
            if (conn != null && release)
                U.close(conn, log);
        }
    }
}
Also used : GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) Message(org.apache.ignite.plugin.extensions.communication.Message) CacheException(javax.cache.CacheException) H2FieldsIterator(org.apache.ignite.internal.processors.query.h2.H2FieldsIterator) IgniteTxAlreadyCompletedCheckedException(org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) TransactionException(org.apache.ignite.transactions.TransactionException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) ResultSet(java.sql.ResultSet) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) ArrayList(java.util.ArrayList) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) ClusterNode(org.apache.ignite.cluster.ClusterNode) H2PooledConnection(org.apache.ignite.internal.processors.query.h2.H2PooledConnection) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteClientDisconnectedException(org.apache.ignite.IgniteClientDisconnectedException) GridH2QueryRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest) ReduceH2QueryInfo(org.apache.ignite.internal.processors.query.h2.ReduceH2QueryInfo) PreparedStatement(java.sql.PreparedStatement) TransactionAlreadyCompletedException(org.apache.ignite.transactions.TransactionAlreadyCompletedException) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException)

Aggregations

QueryContext (org.apache.ignite.internal.processors.query.h2.opt.QueryContext)14 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)7 ArrayList (java.util.ArrayList)5 IndexQueryContext (org.apache.ignite.internal.cache.query.index.sorted.inline.IndexQueryContext)5 PreparedStatement (java.sql.PreparedStatement)4 MvccSnapshot (org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot)4 TraceSurroundings (org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings)4 ResultSet (java.sql.ResultSet)3 SQLException (java.sql.SQLException)3 QueryCancelledException (org.apache.ignite.cache.query.QueryCancelledException)3 Collections.singletonList (java.util.Collections.singletonList)2 List (java.util.List)2 CacheException (javax.cache.CacheException)2 IndexRow (org.apache.ignite.internal.cache.query.index.sorted.IndexRow)2 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)2 KeyCacheObject (org.apache.ignite.internal.processors.cache.KeyCacheObject)2 CacheDataRow (org.apache.ignite.internal.processors.cache.persistence.CacheDataRow)2 GridCacheSqlQuery (org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery)2 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)2 H2Cursor (org.apache.ignite.internal.processors.query.h2.H2Cursor)2