Search in sources :

Example 11 with H2PooledConnection

use of org.apache.ignite.internal.processors.query.h2.H2PooledConnection in project ignite by apache.

the class GridReduceQueryExecutor method query.

/**
 * @param qryId Query ID.
 * @param schemaName Schema name.
 * @param qry Query.
 * @param keepBinary Keep binary.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param timeoutMillis Timeout in milliseconds.
 * @param cancel Query cancel.
 * @param params Query parameters.
 * @param parts Partitions.
 * @param lazy Lazy execution flag.
 * @param mvccTracker Query tracker.
 * @param dataPageScanEnabled If data page scan is enabled.
 * @param pageSize Page size.
 * @return Rows iterator.
 */
@SuppressWarnings("IfMayBeConditional")
public Iterator<List<?>> query(long qryId, String schemaName, final GridCacheTwoStepQuery qry, boolean keepBinary, boolean enforceJoinOrder, int timeoutMillis, GridQueryCancel cancel, Object[] params, int[] parts, boolean lazy, MvccQueryTracker mvccTracker, Boolean dataPageScanEnabled, int pageSize) {
    assert !qry.mvccEnabled() || mvccTracker != null;
    if (pageSize <= 0)
        pageSize = Query.DFLT_PAGE_SIZE;
    // If explicit partitions are set, but there are no real tables, ignore.
    if (!qry.hasCacheIds() && parts != null)
        parts = null;
    // Partitions are not supported for queries over all replicated caches.
    if (parts != null && qry.isReplicatedOnly())
        throw new CacheException("Partitions are not supported for replicated caches");
    try {
        if (qry.mvccEnabled())
            checkActive(tx(ctx));
    } catch (IgniteTxAlreadyCompletedCheckedException e) {
        throw new TransactionAlreadyCompletedException(e.getMessage(), e);
    }
    final boolean singlePartMode = parts != null && parts.length == 1;
    if (F.isEmpty(params))
        params = EMPTY_PARAMS;
    List<Integer> cacheIds = qry.cacheIds();
    List<GridCacheSqlQuery> mapQueries = prepareMapQueries(qry, params, singlePartMode);
    final boolean skipMergeTbl = !qry.explain() && qry.skipMergeTable() || singlePartMode;
    final long retryTimeout = retryTimeout(timeoutMillis);
    final long qryStartTime = U.currentTimeMillis();
    ReduceQueryRun lastRun = null;
    for (int attempt = 0; ; attempt++) {
        ensureQueryNotCancelled(cancel);
        if (attempt > 0) {
            throttleOnRetry(lastRun, qryStartTime, retryTimeout, attempt);
            ensureQueryNotCancelled(cancel);
        }
        AffinityTopologyVersion topVer = h2.readyTopologyVersion();
        // Check if topology has changed while retrying on locked topology.
        if (h2.serverTopologyChanged(topVer) && ctx.cache().context().lockedTopologyVersion(null) != null) {
            throw new CacheException(new TransactionException("Server topology is changed during query " + "execution inside a transaction. It's recommended to rollback and retry transaction."));
        }
        ReducePartitionMapResult mapping = createMapping(qry, parts, cacheIds, topVer);
        if (// Can't map query.
        mapping == null)
            // Retry.
            continue;
        final Collection<ClusterNode> nodes = mapping.nodes();
        final Map<ClusterNode, Integer> nodeToSegmentsCnt = createNodeToSegmentsCountMapping(qry, mapping);
        assert !F.isEmpty(nodes);
        H2PooledConnection conn = h2.connections().connection(schemaName);
        final long qryReqId = qryReqIdGen.incrementAndGet();
        h2.runningQueryManager().trackRequestId(qryReqId);
        boolean release = true;
        try {
            final ReduceQueryRun r = createReduceQueryRun(conn, mapQueries, nodes, pageSize, nodeToSegmentsCnt, skipMergeTbl, qry.explain(), dataPageScanEnabled);
            runs.put(qryReqId, r);
            try {
                cancel.add(() -> send(nodes, new GridQueryCancelRequest(qryReqId), null, true));
                GridH2QueryRequest req = new GridH2QueryRequest().queryId(qryId).requestId(qryReqId).topologyVersion(topVer).pageSize(pageSize).caches(qry.cacheIds()).tables(qry.distributedJoins() ? qry.tables() : null).partitions(convert(mapping.partitionsMap())).queries(mapQueries).parameters(params).flags(queryFlags(qry, enforceJoinOrder, lazy, dataPageScanEnabled)).timeout(timeoutMillis).explicitTimeout(true).schemaName(schemaName);
                if (mvccTracker != null)
                    req.mvccSnapshot(mvccTracker.snapshot());
                final C2<ClusterNode, Message, Message> spec = parts == null ? null : new ReducePartitionsSpecializer(mapping.queryPartitionsMap());
                boolean retry = false;
                if (send(nodes, req, spec, false)) {
                    awaitAllReplies(r, nodes, cancel);
                    if (r.hasErrorOrRetry()) {
                        CacheException err = r.exception();
                        if (err != null) {
                            if (err.getCause() instanceof IgniteClientDisconnectedException)
                                throw err;
                            else if (QueryUtils.wasCancelled(err))
                                // Throw correct exception.
                                throw new QueryCancelledException();
                            throw err;
                        }
                        // If remote node asks us to retry then we have outdated full partition map.
                        h2.awaitForReadyTopologyVersion(r.retryTopologyVersion());
                        retry = true;
                    }
                } else
                    retry = true;
                if (retry) {
                    lastRun = runs.get(qryReqId);
                    assert lastRun != null;
                    // Retry.
                    continue;
                }
                Iterator<List<?>> resIter;
                if (skipMergeTbl) {
                    resIter = new ReduceIndexIterator(this, nodes, r, qryReqId, qry.distributedJoins(), mvccTracker, ctx.tracing());
                    release = false;
                    U.close(conn, log);
                } else {
                    ensureQueryNotCancelled(cancel);
                    QueryContext qctx = new QueryContext(0, null, null, null, null, true);
                    H2Utils.setupConnection(conn, qctx, false, enforceJoinOrder);
                    if (qry.explain())
                        return explainPlan(conn, qry, params);
                    GridCacheSqlQuery rdc = qry.reduceQuery();
                    final PreparedStatement stmt = conn.prepareStatementNoCache(rdc.query());
                    H2Utils.bindParameters(stmt, F.asList(rdc.parameters(params)));
                    ReduceH2QueryInfo qryInfo = new ReduceH2QueryInfo(stmt, qry.originalSql(), ctx.localNodeId(), qryId, qryReqId);
                    ResultSet res = h2.executeSqlQueryWithTimer(stmt, conn, rdc.query(), timeoutMillis, cancel, dataPageScanEnabled, qryInfo);
                    resIter = new H2FieldsIterator(res, mvccTracker, conn, r.pageSize(), log, h2, qryInfo, ctx.tracing());
                    conn = null;
                    // To prevent callback inside finally block;
                    mvccTracker = null;
                }
                return new GridQueryCacheObjectsIterator(resIter, h2.objectContext(), keepBinary);
            } catch (IgniteCheckedException | RuntimeException e) {
                release = true;
                if (e instanceof CacheException) {
                    if (QueryUtils.wasCancelled(e))
                        throw new CacheException("Failed to run reduce query locally.", new QueryCancelledException());
                    throw (CacheException) e;
                }
                Throwable cause = e;
                if (e instanceof IgniteCheckedException) {
                    Throwable disconnectedErr = ((IgniteCheckedException) e).getCause(IgniteClientDisconnectedException.class);
                    if (disconnectedErr != null)
                        cause = disconnectedErr;
                }
                throw new CacheException("Failed to run reduce query locally. " + cause.getMessage(), cause);
            } finally {
                if (release) {
                    releaseRemoteResources(nodes, r, qryReqId, qry.distributedJoins(), mvccTracker);
                    if (!skipMergeTbl) {
                        for (int i = 0, mapQrys = mapQueries.size(); i < mapQrys; i++) // Drop all merge tables.
                        fakeTable(null, i).innerTable(null);
                    }
                }
            }
        } finally {
            if (conn != null && release)
                U.close(conn, log);
        }
    }
}
Also used : GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) Message(org.apache.ignite.plugin.extensions.communication.Message) CacheException(javax.cache.CacheException) H2FieldsIterator(org.apache.ignite.internal.processors.query.h2.H2FieldsIterator) IgniteTxAlreadyCompletedCheckedException(org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) TransactionException(org.apache.ignite.transactions.TransactionException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) ResultSet(java.sql.ResultSet) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) ArrayList(java.util.ArrayList) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) ClusterNode(org.apache.ignite.cluster.ClusterNode) H2PooledConnection(org.apache.ignite.internal.processors.query.h2.H2PooledConnection) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) IgniteClientDisconnectedException(org.apache.ignite.IgniteClientDisconnectedException) GridH2QueryRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest) ReduceH2QueryInfo(org.apache.ignite.internal.processors.query.h2.ReduceH2QueryInfo) PreparedStatement(java.sql.PreparedStatement) TransactionAlreadyCompletedException(org.apache.ignite.transactions.TransactionAlreadyCompletedException) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException)

Aggregations

SQLException (java.sql.SQLException)7 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)7 PreparedStatement (java.sql.PreparedStatement)6 QueryContext (org.apache.ignite.internal.processors.query.h2.opt.QueryContext)6 QueryCancelledException (org.apache.ignite.cache.query.QueryCancelledException)5 H2PooledConnection (org.apache.ignite.internal.processors.query.h2.H2PooledConnection)5 ResultSet (java.sql.ResultSet)4 ArrayList (java.util.ArrayList)4 Collections.singletonList (java.util.Collections.singletonList)4 List (java.util.List)4 CacheException (javax.cache.CacheException)4 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)4 Session (org.h2.engine.Session)4 IgniteClientDisconnectedException (org.apache.ignite.IgniteClientDisconnectedException)3 IgniteException (org.apache.ignite.IgniteException)3 QueryRetryException (org.apache.ignite.cache.query.QueryRetryException)3 GridCacheSqlQuery (org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery)3 GridCacheTwoStepQuery (org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery)3 TraceSurroundings (org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings)3 IgniteTxAlreadyCompletedCheckedException (org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException)3