Search in sources :

Example 1 with GridCacheTwoStepQuery

use of org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery in project ignite by apache.

the class IgniteH2Indexing method queryDistributedSqlFields.

/** {@inheritDoc} */
@Override
public FieldsQueryCursor<List<?>> queryDistributedSqlFields(String schemaName, SqlFieldsQuery qry, boolean keepBinary, GridQueryCancel cancel, @Nullable Integer mainCacheId) {
    final String sqlQry = qry.getSql();
    Connection c = connectionForSchema(schemaName);
    final boolean enforceJoinOrder = qry.isEnforceJoinOrder();
    final boolean distributedJoins = qry.isDistributedJoins();
    final boolean grpByCollocated = qry.isCollocated();
    final DistributedJoinMode distributedJoinMode = distributedJoinMode(qry.isLocal(), distributedJoins);
    GridCacheTwoStepQuery twoStepQry = null;
    List<GridQueryFieldMetadata> meta;
    final H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, sqlQry, grpByCollocated, distributedJoins, enforceJoinOrder, qry.isLocal());
    H2TwoStepCachedQuery cachedQry = twoStepCache.get(cachedQryKey);
    if (cachedQry != null) {
        twoStepQry = cachedQry.query().copy();
        meta = cachedQry.meta();
    } else {
        final UUID locNodeId = ctx.localNodeId();
        // Here we will just parse the statement, no need to optimize it at all.
        H2Utils.setupConnection(c, /*distributedJoins*/
        false, /*enforceJoinOrder*/
        true);
        GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, 0, PREPARE).distributedJoinMode(distributedJoinMode));
        PreparedStatement stmt = null;
        Prepared prepared;
        boolean cachesCreated = false;
        try {
            try {
                while (true) {
                    try {
                        // Do not cache this statement because the whole query object will be cached later on.
                        stmt = prepareStatement(c, sqlQry, false);
                        break;
                    } catch (SQLException e) {
                        if (!cachesCreated && (e.getErrorCode() == ErrorCode.SCHEMA_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.TABLE_OR_VIEW_NOT_FOUND_1 || e.getErrorCode() == ErrorCode.INDEX_NOT_FOUND_1)) {
                            try {
                                ctx.cache().createMissingQueryCaches();
                            } catch (IgniteCheckedException ignored) {
                                throw new CacheException("Failed to create missing caches.", e);
                            }
                            cachesCreated = true;
                        } else
                            throw new IgniteSQLException("Failed to parse query: " + sqlQry, IgniteQueryErrorCode.PARSING, e);
                    }
                }
                prepared = GridSqlQueryParser.prepared(stmt);
                if (qry instanceof JdbcSqlFieldsQuery && ((JdbcSqlFieldsQuery) qry).isQuery() != prepared.isQuery())
                    throw new IgniteSQLException("Given statement type does not match that declared by JDBC driver", IgniteQueryErrorCode.STMT_TYPE_MISMATCH);
                if (prepared.isQuery()) {
                    bindParameters(stmt, F.asList(qry.getArgs()));
                    twoStepQry = GridSqlQuerySplitter.split((JdbcPreparedStatement) stmt, qry.getArgs(), grpByCollocated, distributedJoins, enforceJoinOrder, this);
                    assert twoStepQry != null;
                }
            } finally {
                GridH2QueryContext.clearThreadLocal();
            }
            // It is a DML statement if we did not create a twoStepQuery.
            if (twoStepQry == null) {
                if (DmlStatementsProcessor.isDmlStatement(prepared)) {
                    try {
                        return dmlProc.updateSqlFieldsDistributed(schemaName, stmt, qry, cancel);
                    } catch (IgniteCheckedException e) {
                        throw new IgniteSQLException("Failed to execute DML statement [stmt=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
                    }
                }
                if (DdlStatementsProcessor.isDdlStatement(prepared)) {
                    try {
                        return ddlProc.runDdlStatement(sqlQry, stmt);
                    } catch (IgniteCheckedException e) {
                        throw new IgniteSQLException("Failed to execute DDL statement [stmt=" + sqlQry + ']', e);
                    }
                }
            }
            LinkedHashSet<Integer> caches0 = new LinkedHashSet<>();
            assert twoStepQry != null;
            int tblCnt = twoStepQry.tablesCount();
            if (mainCacheId != null)
                caches0.add(mainCacheId);
            if (tblCnt > 0) {
                for (QueryTable tblKey : twoStepQry.tables()) {
                    GridH2Table tbl = dataTable(tblKey);
                    int cacheId = CU.cacheId(tbl.cacheName());
                    caches0.add(cacheId);
                }
            }
            if (caches0.isEmpty())
                twoStepQry.local(true);
            else {
                //Prohibit usage indices with different numbers of segments in same query.
                List<Integer> cacheIds = new ArrayList<>(caches0);
                checkCacheIndexSegmentation(cacheIds);
                twoStepQry.cacheIds(cacheIds);
                twoStepQry.local(qry.isLocal());
            }
            meta = H2Utils.meta(stmt.getMetaData());
        } catch (IgniteCheckedException e) {
            throw new CacheException("Failed to bind parameters: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
        } catch (SQLException e) {
            throw new IgniteSQLException(e);
        } finally {
            U.close(stmt, log);
        }
    }
    if (log.isDebugEnabled())
        log.debug("Parsed query: `" + sqlQry + "` into two step query: " + twoStepQry);
    twoStepQry.pageSize(qry.getPageSize());
    if (cancel == null)
        cancel = new GridQueryCancel();
    int[] partitions = qry.getPartitions();
    if (partitions == null && twoStepQry.derivedPartitions() != null) {
        try {
            partitions = calculateQueryPartitions(twoStepQry.derivedPartitions(), qry.getArgs());
        } catch (IgniteCheckedException e) {
            throw new CacheException("Failed to calculate derived partitions: [qry=" + sqlQry + ", params=" + Arrays.deepToString(qry.getArgs()) + "]", e);
        }
    }
    QueryCursorImpl<List<?>> cursor = new QueryCursorImpl<>(runQueryTwoStep(schemaName, twoStepQry, keepBinary, enforceJoinOrder, qry.getTimeout(), cancel, qry.getArgs(), partitions), cancel);
    cursor.fieldsMeta(meta);
    if (cachedQry == null && !twoStepQry.explain()) {
        cachedQry = new H2TwoStepCachedQuery(meta, twoStepQry.copy());
        twoStepCache.putIfAbsent(cachedQryKey, cachedQry);
    }
    return cursor;
}
Also used : LinkedHashSet(java.util.LinkedHashSet) SQLException(java.sql.SQLException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) CacheException(javax.cache.CacheException) Prepared(org.h2.command.Prepared) ArrayList(java.util.ArrayList) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridQueryFieldMetadata(org.apache.ignite.internal.processors.query.GridQueryFieldMetadata) IgniteSystemProperties.getString(org.apache.ignite.IgniteSystemProperties.getString) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) ArrayList(java.util.ArrayList) List(java.util.List) UUID(java.util.UUID) JdbcPreparedStatement(org.h2.jdbc.JdbcPreparedStatement) GridH2QueryContext(org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext) Connection(java.sql.Connection) PreparedStatement(java.sql.PreparedStatement) JdbcPreparedStatement(org.h2.jdbc.JdbcPreparedStatement) QueryCursorImpl(org.apache.ignite.internal.processors.cache.QueryCursorImpl) QueryTable(org.apache.ignite.internal.processors.cache.query.QueryTable) IgniteSystemProperties.getInteger(org.apache.ignite.IgniteSystemProperties.getInteger) DistributedJoinMode(org.apache.ignite.internal.processors.query.h2.opt.DistributedJoinMode) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridQueryCancel(org.apache.ignite.internal.processors.query.GridQueryCancel) JdbcSqlFieldsQuery(org.apache.ignite.internal.jdbc2.JdbcSqlFieldsQuery)

Example 2 with GridCacheTwoStepQuery

use of org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery in project ignite by apache.

the class GridSqlQuerySplitter method split.

/**
     * @param stmt Prepared statement.
     * @param params Parameters.
     * @param collocatedGrpBy Whether the query has collocated GROUP BY keys.
     * @param distributedJoins If distributed joins enabled.
     * @param enforceJoinOrder Enforce join order.
     * @param h2 Indexing.
     * @return Two step query.
     * @throws SQLException If failed.
     * @throws IgniteCheckedException If failed.
     */
public static GridCacheTwoStepQuery split(JdbcPreparedStatement stmt, Object[] params, boolean collocatedGrpBy, boolean distributedJoins, boolean enforceJoinOrder, IgniteH2Indexing h2) throws SQLException, IgniteCheckedException {
    if (params == null)
        params = GridCacheSqlQuery.EMPTY_PARAMS;
    // Here we will just do initial query parsing. Do not use optimized
    // subqueries because we do not have unique FROM aliases yet.
    GridSqlQuery qry = parse(prepared(stmt), false);
    String originalSql = qry.getSQL();
    final boolean explain = qry.explain();
    qry.explain(false);
    GridSqlQuerySplitter splitter = new GridSqlQuerySplitter(params, collocatedGrpBy, h2.kernalContext());
    // Normalization will generate unique aliases for all the table filters in FROM.
    // Also it will collect all tables and schemas from the query.
    splitter.normalizeQuery(qry);
    Connection conn = stmt.getConnection();
    // Here we will have correct normalized AST with optimized join order.
    // The distributedJoins parameter is ignored because it is not relevant for
    // the REDUCE query optimization.
    qry = parse(optimize(h2, conn, qry.getSQL(), params, false, enforceJoinOrder), true);
    // Do the actual query split. We will update the original query AST, need to be careful.
    splitter.splitQuery(qry);
    // We must have at least one map query.
    assert !F.isEmpty(splitter.mapSqlQrys) : "map";
    // We must have a reduce query.
    assert splitter.rdcSqlQry != null : "rdc";
    // distributed joins at all.
    if (distributedJoins) {
        boolean allCollocated = true;
        for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) {
            Prepared prepared = optimize(h2, conn, mapSqlQry.query(), mapSqlQry.parameters(params), true, enforceJoinOrder);
            allCollocated &= isCollocated((Query) prepared);
            mapSqlQry.query(parse(prepared, true).getSQL());
        }
        // We do not need distributed joins if all MAP queries are collocated.
        if (allCollocated)
            distributedJoins = false;
    }
    // Setup resulting two step query and return it.
    GridCacheTwoStepQuery twoStepQry = new GridCacheTwoStepQuery(originalSql, splitter.tbls);
    twoStepQry.reduceQuery(splitter.rdcSqlQry);
    for (GridCacheSqlQuery mapSqlQry : splitter.mapSqlQrys) twoStepQry.addMapQuery(mapSqlQry);
    twoStepQry.skipMergeTable(splitter.rdcQrySimple);
    twoStepQry.explain(explain);
    twoStepQry.distributedJoins(distributedJoins);
    // all map queries must have non-empty derivedPartitions to use this feature.
    twoStepQry.derivedPartitions(mergePartitionsFromMultipleQueries(twoStepQry.mapQueries()));
    return twoStepQry;
}
Also used : GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) Query(org.h2.command.dml.Query) Connection(java.sql.Connection) Prepared(org.h2.command.Prepared) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery)

Example 3 with GridCacheTwoStepQuery

use of org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery in project ignite by apache.

the class IgniteH2Indexing method parseAndSplit.

/**
 * Parse and split query if needed, cache either two-step query or statement.
 * @param schemaName Schema name.
 * @param qry Query.
 * @param firstArg Position of the first argument of the following {@code Prepared}.
 * @return Result: prepared statement, H2 command, two-step query (if needed),
 *     metadata for two-step query (if needed), evaluated query local execution flag.
 */
private ParsingResult parseAndSplit(String schemaName, SqlFieldsQuery qry, int firstArg) {
    Connection c = connectionForSchema(schemaName);
    // For queries that are explicitly local, we rely on the flag specified in the query
    // because this parsing result will be cached and used for queries directly.
    // For other queries, we enforce join order at this stage to avoid premature optimizations
    // (and therefore longer parsing) as long as there'll be more parsing at split stage.
    boolean enforceJoinOrderOnParsing = (!qry.isLocal() || qry.isEnforceJoinOrder());
    H2Utils.setupConnection(c, /*distributedJoins*/
    false, /*enforceJoinOrder*/
    enforceJoinOrderOnParsing);
    boolean loc = qry.isLocal();
    PreparedStatement stmt = prepareStatementAndCaches(c, qry.getSql());
    if (loc && GridSqlQueryParser.checkMultipleStatements(stmt))
        throw new IgniteSQLException("Multiple statements queries are not supported for local queries");
    GridSqlQueryParser.PreparedWithRemaining prep = GridSqlQueryParser.preparedWithRemaining(stmt);
    Prepared prepared = prep.prepared();
    checkQueryType(qry, prepared.isQuery());
    String remainingSql = prep.remainingSql();
    int paramsCnt = prepared.getParameters().size();
    Object[] argsOrig = qry.getArgs();
    Object[] args = null;
    if (!DmlUtils.isBatched(qry) && paramsCnt > 0) {
        if (argsOrig == null || argsOrig.length < firstArg + paramsCnt) {
            throw new IgniteException("Invalid number of query parameters. " + "Cannot find " + (argsOrig != null ? argsOrig.length + 1 - firstArg : 1) + " parameter.");
        }
        args = Arrays.copyOfRange(argsOrig, firstArg, firstArg + paramsCnt);
    }
    if (prepared.isQuery()) {
        try {
            bindParameters(stmt, F.asList(args));
        } catch (IgniteCheckedException e) {
            U.closeQuiet(stmt);
            throw new IgniteSQLException("Failed to bind parameters: [qry=" + prepared.getSQL() + ", params=" + Arrays.deepToString(args) + "]", IgniteQueryErrorCode.PARSING, e);
        }
        GridSqlQueryParser parser = null;
        if (!loc) {
            parser = new GridSqlQueryParser(false);
            GridSqlStatement parsedStmt = parser.parse(prepared);
            // Legit assertion - we have H2 query flag above.
            assert parsedStmt instanceof GridSqlQuery;
            loc = parser.isLocalQuery(qry.isReplicatedOnly());
        }
        if (loc) {
            if (parser == null) {
                parser = new GridSqlQueryParser(false);
                parser.parse(prepared);
            }
            GridCacheContext cctx = parser.getFirstPartitionedCache();
            if (cctx != null && cctx.config().getQueryParallelism() > 1) {
                loc = false;
                qry.setDistributedJoins(true);
            }
        }
    }
    SqlFieldsQuery newQry = cloneFieldsQuery(qry).setSql(prepared.getSQL()).setArgs(args);
    boolean hasTwoStep = !loc && prepared.isQuery();
    // Let's not cache multiple statements and distributed queries as whole two step query will be cached later on.
    if (remainingSql != null || hasTwoStep)
        getStatementsCacheForCurrentThread().remove(schemaName, qry.getSql());
    if (!hasTwoStep)
        return new ParsingResult(prepared, newQry, remainingSql, null, null, null);
    final UUID locNodeId = ctx.localNodeId();
    // Now we're sure to have a distributed query. Let's try to get a two-step plan from the cache, or perform the
    // split if needed.
    H2TwoStepCachedQueryKey cachedQryKey = new H2TwoStepCachedQueryKey(schemaName, qry.getSql(), qry.isCollocated(), qry.isDistributedJoins(), qry.isEnforceJoinOrder(), qry.isLocal());
    H2TwoStepCachedQuery cachedQry;
    if ((cachedQry = twoStepCache.get(cachedQryKey)) != null) {
        checkQueryType(qry, true);
        GridCacheTwoStepQuery twoStepQry = cachedQry.query().copy();
        List<GridQueryFieldMetadata> meta = cachedQry.meta();
        return new ParsingResult(prepared, newQry, remainingSql, twoStepQry, cachedQryKey, meta);
    }
    try {
        GridH2QueryContext.set(new GridH2QueryContext(locNodeId, locNodeId, 0, PREPARE).distributedJoinMode(distributedJoinMode(qry.isLocal(), qry.isDistributedJoins())));
        try {
            return new ParsingResult(prepared, newQry, remainingSql, split(prepared, newQry), cachedQryKey, H2Utils.meta(stmt.getMetaData()));
        } catch (IgniteCheckedException e) {
            throw new IgniteSQLException("Failed to bind parameters: [qry=" + newQry.getSql() + ", params=" + Arrays.deepToString(newQry.getArgs()) + "]", IgniteQueryErrorCode.PARSING, e);
        } catch (SQLException e) {
            throw new IgniteSQLException(e);
        } finally {
            U.close(stmt, log);
        }
    } finally {
        GridH2QueryContext.clearThreadLocal();
    }
}
Also used : SQLException(java.sql.SQLException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridSqlStatement(org.apache.ignite.internal.processors.query.h2.sql.GridSqlStatement) Prepared(org.h2.command.Prepared) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridQueryFieldMetadata(org.apache.ignite.internal.processors.query.GridQueryFieldMetadata) IgniteSystemProperties.getString(org.apache.ignite.IgniteSystemProperties.getString) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) UUID(java.util.UUID) GridH2QueryContext(org.apache.ignite.internal.processors.query.h2.opt.GridH2QueryContext) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) Connection(java.sql.Connection) PreparedStatement(java.sql.PreparedStatement) SqlFieldsQuery(org.apache.ignite.cache.query.SqlFieldsQuery) GridSqlQuery(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery) GridSqlQueryParser(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException)

Example 4 with GridCacheTwoStepQuery

use of org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery in project ignite by apache.

the class IgniteH2Indexing method unregisterCache.

/**
 * {@inheritDoc}
 */
@Override
public void unregisterCache(GridCacheContext cctx, boolean rmvIdx) {
    rowCache.onCacheUnregistered(cctx);
    String cacheName = cctx.name();
    String schemaName = schema(cacheName);
    H2Schema schema = schemas.get(schemaName);
    if (schema != null) {
        mapQryExec.onCacheStop(cacheName);
        dmlProc.onCacheStop(cacheName);
        // Remove this mapping only after callback to DML proc - it needs that mapping internally
        cacheName2schema.remove(cacheName);
        // Drop tables.
        Collection<H2TableDescriptor> rmvTbls = new HashSet<>();
        for (H2TableDescriptor tbl : schema.tables()) {
            if (F.eq(tbl.cache().name(), cacheName)) {
                try {
                    tbl.table().setRemoveIndexOnDestroy(rmvIdx);
                    dropTable(tbl);
                } catch (IgniteCheckedException e) {
                    U.error(log, "Failed to drop table on cache stop (will ignore): " + tbl.fullTableName(), e);
                }
                schema.drop(tbl);
                rmvTbls.add(tbl);
            }
        }
        if (!isDefaultSchema(schemaName)) {
            synchronized (schemaMux) {
                if (schema.decrementUsageCount() == 0) {
                    schemas.remove(schemaName);
                    try {
                        dropSchema(schemaName);
                    } catch (IgniteCheckedException e) {
                        U.error(log, "Failed to drop schema on cache stop (will ignore): " + cacheName, e);
                    }
                }
            }
        }
        stmtCache.clear();
        for (H2TableDescriptor tbl : rmvTbls) {
            for (Index idx : tbl.table().getIndexes()) idx.close(null);
        }
        int cacheId = CU.cacheId(cacheName);
        for (Iterator<Map.Entry<H2TwoStepCachedQueryKey, H2TwoStepCachedQuery>> it = twoStepCache.entrySet().iterator(); it.hasNext(); ) {
            Map.Entry<H2TwoStepCachedQueryKey, H2TwoStepCachedQuery> e = it.next();
            GridCacheTwoStepQuery qry = e.getValue().query();
            if (!F.isEmpty(qry.cacheIds()) && qry.cacheIds().contains(cacheId))
                it.remove();
        }
    }
}
Also used : GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) Index(org.h2.index.Index) H2TreeIndex(org.apache.ignite.internal.processors.query.h2.database.H2TreeIndex) IgniteSystemProperties.getString(org.apache.ignite.IgniteSystemProperties.getString) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) GridBoundedConcurrentLinkedHashMap(org.apache.ignite.internal.util.GridBoundedConcurrentLinkedHashMap) LinkedHashSet(java.util.LinkedHashSet) HashSet(java.util.HashSet)

Example 5 with GridCacheTwoStepQuery

use of org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery in project ignite by apache.

the class QueryParser method parseH2.

/**
 * Parse and split query if needed, cache either two-step query or statement.
 *
 * @param schemaName Schema name.
 * @param qry Query.
 * @param batched Batched flag.
 * @param remainingAllowed Whether multiple statements are allowed.
 * @return Parsing result.
 */
@SuppressWarnings("IfMayBeConditional")
private QueryParserResult parseH2(String schemaName, SqlFieldsQuery qry, boolean batched, boolean remainingAllowed) {
    try (H2PooledConnection c = connMgr.connection(schemaName)) {
        // For queries that are explicitly local, we rely on the flag specified in the query
        // because this parsing result will be cached and used for queries directly.
        // For other queries, we enforce join order at this stage to avoid premature optimizations
        // (and therefore longer parsing) as long as there'll be more parsing at split stage.
        boolean enforceJoinOrderOnParsing = (!qry.isLocal() || qry.isEnforceJoinOrder());
        QueryContext qctx = QueryContext.parseContext(idx.backupFilter(null, null), qry.isLocal());
        H2Utils.setupConnection(c, qctx, false, enforceJoinOrderOnParsing, false);
        PreparedStatement stmt = null;
        try {
            stmt = c.prepareStatementNoCache(qry.getSql());
            if (qry.isLocal() && GridSqlQueryParser.checkMultipleStatements(stmt))
                throw new IgniteSQLException("Multiple statements queries are not supported for local queries.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            GridSqlQueryParser.PreparedWithRemaining prep = GridSqlQueryParser.preparedWithRemaining(stmt);
            Prepared prepared = prep.prepared();
            if (GridSqlQueryParser.isExplainUpdate(prepared))
                throw new IgniteSQLException("Explains of update queries are not supported.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            // Get remaining query and check if it is allowed.
            SqlFieldsQuery remainingQry = null;
            if (!F.isEmpty(prep.remainingSql())) {
                checkRemainingAllowed(remainingAllowed);
                remainingQry = cloneFieldsQuery(qry).setSql(prep.remainingSql());
            }
            // Prepare new query.
            SqlFieldsQuery newQry = cloneFieldsQuery(qry).setSql(prepared.getSQL());
            final int paramsCnt = prepared.getParameters().size();
            Object[] argsOrig = qry.getArgs();
            Object[] args = null;
            Object[] remainingArgs = null;
            if (!batched && paramsCnt > 0) {
                if (argsOrig == null || argsOrig.length < paramsCnt)
                    // Not enough parameters, but we will handle this later on execution phase.
                    args = argsOrig;
                else {
                    args = Arrays.copyOfRange(argsOrig, 0, paramsCnt);
                    if (paramsCnt != argsOrig.length)
                        remainingArgs = Arrays.copyOfRange(argsOrig, paramsCnt, argsOrig.length);
                }
            } else
                remainingArgs = argsOrig;
            newQry.setArgs(args);
            QueryDescriptor newQryDesc = queryDescriptor(schemaName, newQry);
            if (remainingQry != null)
                remainingQry.setArgs(remainingArgs);
            final List<JdbcParameterMeta> paramsMeta;
            try {
                paramsMeta = H2Utils.parametersMeta(stmt.getParameterMetaData());
                assert prepared.getParameters().size() == paramsMeta.size();
            } catch (IgniteCheckedException | SQLException e) {
                throw new IgniteSQLException("Failed to get parameters metadata", IgniteQueryErrorCode.UNKNOWN, e);
            }
            // Do actual parsing.
            if (CommandProcessor.isCommand(prepared)) {
                GridSqlStatement cmdH2 = new GridSqlQueryParser(false, log).parse(prepared);
                QueryParserResultCommand cmd = new QueryParserResultCommand(null, cmdH2, false);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, null, cmd);
            } else if (CommandProcessor.isCommandNoOp(prepared)) {
                QueryParserResultCommand cmd = new QueryParserResultCommand(null, null, true);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, null, cmd);
            } else if (GridSqlQueryParser.isDml(prepared)) {
                QueryParserResultDml dml = prepareDmlStatement(newQryDesc, prepared);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, dml, null);
            } else if (!prepared.isQuery()) {
                throw new IgniteSQLException("Unsupported statement: " + newQry.getSql(), IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            }
            // Parse SELECT.
            GridSqlQueryParser parser = new GridSqlQueryParser(false, log);
            GridSqlQuery selectStmt = (GridSqlQuery) parser.parse(prepared);
            List<Integer> cacheIds = parser.cacheIds();
            Integer mvccCacheId = mvccCacheIdForSelect(parser.objectsMap());
            // Calculate if query is in fact can be executed locally.
            boolean loc = qry.isLocal();
            if (!loc) {
                if (parser.isLocalQuery())
                    loc = true;
            }
            // If this is a local query, check if it must be split.
            boolean locSplit = false;
            if (loc) {
                GridCacheContext cctx = parser.getFirstPartitionedCache();
                if (cctx != null && cctx.config().getQueryParallelism() > 1)
                    locSplit = true;
            }
            // Split is required either if query is distributed, or when it is local, but executed
            // over segmented PARTITIONED case. In this case multiple map queries will be executed against local
            // node stripes in parallel and then merged through reduce process.
            boolean splitNeeded = !loc || locSplit;
            String forUpdateQryOutTx = null;
            String forUpdateQryTx = null;
            GridCacheTwoStepQuery forUpdateTwoStepQry = null;
            boolean forUpdate = GridSqlQueryParser.isForUpdateQuery(prepared);
            // column to be able to lock selected rows further.
            if (forUpdate) {
                // We have checked above that it's not an UNION query, so it's got to be SELECT.
                assert selectStmt instanceof GridSqlSelect;
                // Check FOR UPDATE invariants: only one table, MVCC is there.
                if (cacheIds.size() != 1)
                    throw new IgniteSQLException("SELECT FOR UPDATE is supported only for queries " + "that involve single transactional cache.");
                if (mvccCacheId == null)
                    throw new IgniteSQLException("SELECT FOR UPDATE query requires transactional cache " + "with MVCC enabled.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
                // We need a copy because we are going to modify AST a bit. We do not want to modify original select.
                GridSqlSelect selForUpdate = ((GridSqlSelect) selectStmt).copySelectForUpdate();
                // Clear forUpdate flag to run it as a plain query.
                selForUpdate.forUpdate(false);
                ((GridSqlSelect) selectStmt).forUpdate(false);
                // Remember sql string without FOR UPDATE clause.
                forUpdateQryOutTx = selForUpdate.getSQL();
                GridSqlAlias keyCol = keyColumn(selForUpdate);
                selForUpdate.addColumn(keyCol, true);
                // Remember sql string without FOR UPDATE clause and with _key column.
                forUpdateQryTx = selForUpdate.getSQL();
                // Prepare additional two-step query for FOR UPDATE case.
                if (splitNeeded) {
                    c.schema(newQry.getSchema());
                    forUpdateTwoStepQry = GridSqlQuerySplitter.split(c, selForUpdate, forUpdateQryTx, newQry.isCollocated(), newQry.isDistributedJoins(), newQry.isEnforceJoinOrder(), locSplit, idx, paramsCnt, log);
                }
            }
            GridCacheTwoStepQuery twoStepQry = null;
            if (splitNeeded) {
                GridSubqueryJoinOptimizer.pullOutSubQueries(selectStmt);
                c.schema(newQry.getSchema());
                twoStepQry = GridSqlQuerySplitter.split(c, selectStmt, newQry.getSql(), newQry.isCollocated(), newQry.isDistributedJoins(), newQry.isEnforceJoinOrder(), locSplit, idx, paramsCnt, log);
            }
            List<GridQueryFieldMetadata> meta = H2Utils.meta(stmt.getMetaData());
            QueryParserResultSelect select = new QueryParserResultSelect(selectStmt, twoStepQry, forUpdateTwoStepQry, meta, cacheIds, mvccCacheId, forUpdateQryOutTx, forUpdateQryTx);
            return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, select, null, null);
        } catch (IgniteCheckedException | SQLException e) {
            throw new IgniteSQLException("Failed to parse query. " + e.getMessage(), IgniteQueryErrorCode.PARSING, e);
        } finally {
            U.close(stmt, log);
        }
    }
}
Also used : GridSqlAlias(org.apache.ignite.internal.processors.query.h2.sql.GridSqlAlias) SQLException(java.sql.SQLException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridSqlStatement(org.apache.ignite.internal.processors.query.h2.sql.GridSqlStatement) Prepared(org.h2.command.Prepared) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridQueryFieldMetadata(org.apache.ignite.internal.processors.query.GridQueryFieldMetadata) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) JdbcParameterMeta(org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta) PreparedStatement(java.sql.PreparedStatement) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) GridSqlSelect(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSelect) SqlFieldsQuery(org.apache.ignite.cache.query.SqlFieldsQuery) GridSqlQuery(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery) GridSqlQueryParser(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException)

Aggregations

GridCacheTwoStepQuery (org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery)15 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)8 Prepared (org.h2.command.Prepared)7 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)6 PreparedStatement (java.sql.PreparedStatement)5 SQLException (java.sql.SQLException)5 IgniteSystemProperties.getString (org.apache.ignite.IgniteSystemProperties.getString)4 Connection (java.sql.Connection)3 LinkedHashSet (java.util.LinkedHashSet)3 GridCacheSqlQuery (org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery)3 GridQueryFieldMetadata (org.apache.ignite.internal.processors.query.GridQueryFieldMetadata)3 GridSqlQuery (org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery)3 GridSqlQueryParser (org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser)3 Query (org.h2.command.dml.Query)3 ArrayList (java.util.ArrayList)2 HashSet (java.util.HashSet)2 List (java.util.List)2 Map (java.util.Map)2 UUID (java.util.UUID)2 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)2