Search in sources :

Example 1 with H2PooledConnection

use of org.apache.ignite.internal.processors.query.h2.H2PooledConnection in project ignite by apache.

the class GridReduceQueryExecutor method createMergeTable.

/**
 * @param conn Connection.
 * @param qry Query.
 * @param explain Explain.
 * @return Table.
 * @throws IgniteCheckedException If failed.
 */
@SuppressWarnings("unchecked")
private ReduceTable createMergeTable(H2PooledConnection conn, GridCacheSqlQuery qry, boolean explain) throws IgniteCheckedException {
    try {
        Session ses = H2Utils.session(conn);
        CreateTableData data = new CreateTableData();
        data.tableName = "T___";
        data.schema = ses.getDatabase().getSchema(ses.getCurrentSchemaName());
        data.create = true;
        if (!explain) {
            LinkedHashMap<String, ?> colsMap = qry.columns();
            assert colsMap != null;
            ArrayList<Column> cols = new ArrayList<>(colsMap.size());
            for (Map.Entry<String, ?> e : colsMap.entrySet()) {
                String alias = e.getKey();
                GridSqlType type = (GridSqlType) e.getValue();
                assert !F.isEmpty(alias);
                Column col0;
                if (type == GridSqlType.UNKNOWN) {
                    // Special case for parameter being set at the top of the query (e.g. SELECT ? FROM ...).
                    // Re-map it to STRING in the same way it is done in H2, because any argument can be cast
                    // to string.
                    col0 = new Column(alias, Value.STRING);
                } else {
                    col0 = new Column(alias, type.type(), type.precision(), type.scale(), type.displaySize());
                }
                cols.add(col0);
            }
            data.columns = cols;
        } else
            data.columns = planColumns();
        boolean sortedIndex = !F.isEmpty(qry.sortColumns());
        ReduceTable tbl = new ReduceTable(data);
        ArrayList<Index> idxs = new ArrayList<>(2);
        if (explain) {
            idxs.add(new UnsortedReduceIndexAdapter(ctx, tbl, sortedIndex ? MERGE_INDEX_SORTED : MERGE_INDEX_UNSORTED));
        } else if (sortedIndex) {
            List<GridSqlSortColumn> sortCols = (List<GridSqlSortColumn>) qry.sortColumns();
            SortedReduceIndexAdapter sortedMergeIdx = new SortedReduceIndexAdapter(ctx, tbl, MERGE_INDEX_SORTED, GridSqlSortColumn.toIndexColumns(tbl, sortCols));
            idxs.add(ReduceTable.createScanIndex(sortedMergeIdx));
            idxs.add(sortedMergeIdx);
        } else
            idxs.add(new UnsortedReduceIndexAdapter(ctx, tbl, MERGE_INDEX_UNSORTED));
        tbl.indexes(idxs);
        return tbl;
    } catch (Exception e) {
        throw new IgniteCheckedException(e);
    }
}
Also used : ArrayList(java.util.ArrayList) Index(org.h2.index.Index) CreateTableData(org.h2.command.ddl.CreateTableData) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException) IgniteClientDisconnectedException(org.apache.ignite.IgniteClientDisconnectedException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) TransactionAlreadyCompletedException(org.apache.ignite.transactions.TransactionAlreadyCompletedException) IgniteTxAlreadyCompletedCheckedException(org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException) QueryRetryException(org.apache.ignite.cache.query.QueryRetryException) SQLException(java.sql.SQLException) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) CacheException(javax.cache.CacheException) TransactionException(org.apache.ignite.transactions.TransactionException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridSqlSortColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSortColumn) Column(org.h2.table.Column) GridSqlSortColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSortColumn) GridSqlType(org.apache.ignite.internal.processors.query.h2.sql.GridSqlType) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) LinkedHashMap(java.util.LinkedHashMap) Collections.singletonMap(java.util.Collections.singletonMap) Session(org.h2.engine.Session)

Example 2 with H2PooledConnection

use of org.apache.ignite.internal.processors.query.h2.H2PooledConnection in project ignite by apache.

the class GridReduceQueryExecutor method createReduceQueryRun.

/**
 * Query run factory method.
 *
 * @param conn H2 connection.
 * @param mapQueries Map queries.
 * @param nodes Target nodes.
 * @param pageSize Page size.
 * @param nodeToSegmentsCnt Segments per-index.
 * @param skipMergeTbl Skip merge table flag.
 * @param explain Explain query flag.
 * @param dataPageScanEnabled DataPage scan enabled flag.
 * @return Reduce query run.
 */
@NotNull
private ReduceQueryRun createReduceQueryRun(H2PooledConnection conn, List<GridCacheSqlQuery> mapQueries, Collection<ClusterNode> nodes, int pageSize, Map<ClusterNode, Integer> nodeToSegmentsCnt, boolean skipMergeTbl, boolean explain, Boolean dataPageScanEnabled) {
    final ReduceQueryRun r = new ReduceQueryRun(mapQueries.size(), pageSize, dataPageScanEnabled);
    int tblIdx = 0;
    int replicatedQrysCnt = 0;
    for (GridCacheSqlQuery mapQry : mapQueries) {
        Reducer reducer;
        if (skipMergeTbl)
            reducer = UnsortedOneWayReducer.createDummy(ctx);
        else {
            ReduceTable tbl;
            try {
                tbl = createMergeTable(conn, mapQry, explain);
            } catch (IgniteCheckedException e) {
                throw new IgniteException(e);
            }
            reducer = tbl.getReducer();
            fakeTable(conn, tblIdx++).innerTable(tbl);
        }
        // If the query has only replicated tables, we have to run it on a single node only.
        if (!mapQry.isPartitioned()) {
            ClusterNode node = F.rand(nodes);
            mapQry.node(node.id());
            replicatedQrysCnt++;
            // Replicated tables can have only 1 segment.
            reducer.setSources(singletonMap(node, 1));
        } else
            reducer.setSources(nodeToSegmentsCnt);
        reducer.setPageSize(r.pageSize());
        r.reducers().add(reducer);
    }
    int cnt = nodeToSegmentsCnt.values().stream().mapToInt(i -> i).sum();
    r.init((r.reducers().size() - replicatedQrysCnt) * cnt + replicatedQrysCnt);
    return r;
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) SQL_DML_QRY_RESP(org.apache.ignite.internal.processors.tracing.SpanType.SQL_DML_QRY_RESP) GridQueryCancel(org.apache.ignite.internal.processors.query.GridQueryCancel) QueryUtils(org.apache.ignite.internal.processors.query.QueryUtils) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException) IGNITE_SQL_RETRY_TIMEOUT(org.apache.ignite.IgniteSystemProperties.IGNITE_SQL_RETRY_TIMEOUT) SQL_FAIL_RESP(org.apache.ignite.internal.processors.tracing.SpanType.SQL_FAIL_RESP) H2Utils(org.apache.ignite.internal.processors.query.h2.H2Utils) Index(org.h2.index.Index) IgniteSystemProperties(org.apache.ignite.IgniteSystemProperties) ReduceH2QueryInfo(org.apache.ignite.internal.processors.query.h2.ReduceH2QueryInfo) Collections.singletonList(java.util.Collections.singletonList) ResultSet(java.sql.ResultSet) Map(java.util.Map) GridIoPolicy(org.apache.ignite.internal.managers.communication.GridIoPolicy) GridQueryFailResponse(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryFailResponse) GridH2DmlResponse(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlResponse) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) H2PooledConnection(org.apache.ignite.internal.processors.query.h2.H2PooledConnection) IgniteFuture(org.apache.ignite.lang.IgniteFuture) IgniteClientDisconnectedException(org.apache.ignite.IgniteClientDisconnectedException) CIX2(org.apache.ignite.internal.util.typedef.CIX2) Collection(java.util.Collection) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) UpdateResult(org.apache.ignite.internal.processors.query.h2.UpdateResult) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) TransactionAlreadyCompletedException(org.apache.ignite.transactions.TransactionAlreadyCompletedException) UUID(java.util.UUID) IgniteTxAlreadyCompletedCheckedException(org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException) EMPTY_PARAMS(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery.EMPTY_PARAMS) PreparedStatement(java.sql.PreparedStatement) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) Nullable(org.jetbrains.annotations.Nullable) ERROR(org.apache.ignite.internal.processors.tracing.SpanTags.ERROR) List(java.util.List) GridQueryCacheObjectsIterator(org.apache.ignite.internal.processors.query.GridQueryCacheObjectsIterator) MTC(org.apache.ignite.internal.processors.tracing.MTC) InlineIndexImpl(org.apache.ignite.internal.cache.query.index.sorted.inline.InlineIndexImpl) CreateTableData(org.h2.command.ddl.CreateTableData) Message(org.apache.ignite.plugin.extensions.communication.Message) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) NotNull(org.jetbrains.annotations.NotNull) GridQueryNextPageRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageRequest) GridSqlSortColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSortColumn) DiscoveryEvent(org.apache.ignite.events.DiscoveryEvent) GridH2QueryRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2QueryRequest) MvccUtils.tx(org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.tx) GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) U(org.apache.ignite.internal.util.typedef.internal.U) HashMap(java.util.HashMap) IgniteLogger(org.apache.ignite.IgniteLogger) QueryRetryException(org.apache.ignite.cache.query.QueryRetryException) IgniteBiClosure(org.apache.ignite.lang.IgniteBiClosure) ArrayList(java.util.ArrayList) ConcurrentMap(java.util.concurrent.ConcurrentMap) GridKernalContext(org.apache.ignite.internal.GridKernalContext) SQL_PAGE_RESP(org.apache.ignite.internal.processors.tracing.SpanType.SQL_PAGE_RESP) LinkedHashMap(java.util.LinkedHashMap) Column(org.h2.table.Column) SQLException(java.sql.SQLException) Session(org.h2.engine.Session) ClusterNode(org.apache.ignite.cluster.ClusterNode) IntArray(org.h2.util.IntArray) H2FieldsIterator(org.apache.ignite.internal.processors.query.h2.H2FieldsIterator) IgniteH2Indexing(org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) CacheException(javax.cache.CacheException) TransactionException(org.apache.ignite.transactions.TransactionException) Collections.singletonMap(java.util.Collections.singletonMap) C2(org.apache.ignite.internal.util.typedef.C2) Value(org.h2.value.Value) GridSqlQuerySplitter.mergeTableIdentifier(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuerySplitter.mergeTableIdentifier) F(org.apache.ignite.internal.util.typedef.F) Query(org.apache.ignite.cache.query.Query) Iterator(java.util.Iterator) ReentrantLock(java.util.concurrent.locks.ReentrantLock) DmlDistributedUpdateRun(org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedUpdateRun) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) GridH2DmlRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest) GridSqlType(org.apache.ignite.internal.processors.query.h2.sql.GridSqlType) TimeUnit(java.util.concurrent.TimeUnit) GridQueryNextPageResponse(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageResponse) AtomicLong(java.util.concurrent.atomic.AtomicLong) Lock(java.util.concurrent.locks.Lock) GridTopic(org.apache.ignite.internal.GridTopic) BitSet(java.util.BitSet) MvccUtils.checkActive(org.apache.ignite.internal.processors.cache.mvcc.MvccUtils.checkActive) Collections(java.util.Collections) MvccQueryTracker(org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) TraceSurroundings(org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) NotNull(org.jetbrains.annotations.NotNull)

Example 3 with H2PooledConnection

use of org.apache.ignite.internal.processors.query.h2.H2PooledConnection in project ignite by apache.

the class GridMapQueryExecutor method onQueryRequest0.

/**
 * @param node Node authored request.
 * @param qryId Query ID.
 * @param reqId Request ID.
 * @param segmentId index segment ID.
 * @param schemaName Schema name.
 * @param qrys Queries to execute.
 * @param cacheIds Caches which will be affected by these queries.
 * @param topVer Topology version.
 * @param partsMap Partitions map for unstable topology.
 * @param parts Explicit partitions for current node.
 * @param pageSize Page size.
 * @param distributedJoins Query distributed join mode.
 * @param enforceJoinOrder Enforce join order H2 flag.
 * @param replicated Replicated only flag.
 * @param timeout Query timeout.
 * @param params Query parameters.
 * @param lazy Streaming flag.
 * @param mvccSnapshot MVCC snapshot.
 * @param dataPageScanEnabled If data page scan is enabled.
 */
private void onQueryRequest0(final ClusterNode node, final long qryId, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final boolean distributedJoins, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy, @Nullable final MvccSnapshot mvccSnapshot, Boolean dataPageScanEnabled, boolean treatReplicatedAsPartitioned) {
    boolean performanceStatsEnabled = ctx.performanceStatistics().enabled();
    if (performanceStatsEnabled)
        IoStatisticsQueryHelper.startGatheringQueryStatistics();
    // Prepare to run queries.
    GridCacheContext<?, ?> mainCctx = mainCacheContext(cacheIds);
    MapNodeResults nodeRess = resultsForNode(node.id());
    MapQueryResults qryResults = null;
    PartitionReservation reserved = null;
    QueryContext qctx = null;
    // We don't use try with resources on purpose - the catch block must also be executed in the context of this span.
    TraceSurroundings trace = MTC.support(ctx.tracing().create(SQL_QRY_EXEC_REQ, MTC.span()).addTag(SQL_QRY_TEXT, () -> qrys.stream().map(GridCacheSqlQuery::query).collect(Collectors.joining("; "))));
    try {
        if (topVer != null) {
            // Reserve primary for topology version or explicit partitions.
            reserved = h2.partitionReservationManager().reservePartitions(cacheIds, topVer, parts, node.id(), reqId);
            if (reserved.failed()) {
                sendRetry(node, reqId, segmentId, reserved.error());
                return;
            }
        }
        // Prepare query context.
        DistributedJoinContext distributedJoinCtx = null;
        if (distributedJoins && !replicated) {
            distributedJoinCtx = new DistributedJoinContext(topVer, partsMap, node.id(), reqId, segmentId, pageSize);
        }
        qctx = new QueryContext(segmentId, h2.backupFilter(topVer, parts, treatReplicatedAsPartitioned), distributedJoinCtx, mvccSnapshot, reserved, true);
        qryResults = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, lazy, qctx);
        // qctx is set, we have to release reservations inside of it.
        reserved = null;
        if (distributedJoinCtx != null)
            qryCtxRegistry.setShared(node.id(), reqId, qctx);
        if (nodeRess.put(reqId, segmentId, qryResults) != null)
            throw new IllegalStateException();
        if (nodeRess.cancelled(reqId)) {
            qryCtxRegistry.clearShared(node.id(), reqId);
            nodeRess.cancelRequest(reqId);
            throw new QueryCancelledException();
        }
        // Run queries.
        int qryIdx = 0;
        boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
        for (GridCacheSqlQuery qry : qrys) {
            H2PooledConnection conn = h2.connections().connection(schemaName);
            H2Utils.setupConnection(conn, qctx, distributedJoins, enforceJoinOrder, lazy);
            MapQueryResult res = new MapQueryResult(h2, mainCctx, node.id(), qry, params, conn, log);
            qryResults.addResult(qryIdx, res);
            try {
                res.lock();
                // Ensure we are on the target node for this replicated query.
                if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
                    String sql = qry.query();
                    Collection<Object> params0 = F.asList(qry.parameters(params));
                    PreparedStatement stmt = conn.prepareStatement(sql, H2StatementCache.queryFlags(distributedJoins, enforceJoinOrder));
                    H2Utils.bindParameters(stmt, params0);
                    MapH2QueryInfo qryInfo = new MapH2QueryInfo(stmt, qry.query(), node.id(), qryId, reqId, segmentId);
                    ResultSet rs = h2.executeSqlQueryWithTimer(stmt, conn, sql, timeout, qryResults.queryCancel(qryIdx), dataPageScanEnabled, qryInfo);
                    if (evt) {
                        ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
                    }
                    assert rs instanceof JdbcResultSet : rs.getClass();
                    if (qryResults.cancelled()) {
                        rs.close();
                        throw new QueryCancelledException();
                    }
                    res.openResult(rs, qryInfo);
                    final GridQueryNextPageResponse msg = prepareNextPage(nodeRess, node, qryResults, qryIdx, segmentId, pageSize, dataPageScanEnabled);
                    if (msg != null)
                        sendNextPage(node, msg);
                } else {
                    assert !qry.isPartitioned();
                    qryResults.closeResult(qryIdx);
                }
                qryIdx++;
            } finally {
                try {
                    res.unlockTables();
                } finally {
                    res.unlock();
                }
            }
        }
        if (!lazy)
            qryResults.releaseQueryContext();
    } catch (Throwable e) {
        if (qryResults != null) {
            nodeRess.remove(reqId, segmentId, qryResults);
            qryResults.close();
            // If a query is cancelled before execution is started partitions have to be released.
            if (!lazy || !qryResults.isAllClosed())
                qryResults.releaseQueryContext();
        } else
            releaseReservations(qctx);
        if (e instanceof QueryCancelledException)
            sendError(node, reqId, e);
        else {
            SQLException sqlEx = X.cause(e, SQLException.class);
            if (sqlEx != null && sqlEx.getErrorCode() == ErrorCode.STATEMENT_WAS_CANCELED)
                sendQueryCancel(node, reqId);
            else {
                GridH2RetryException retryErr = X.cause(e, GridH2RetryException.class);
                if (retryErr != null) {
                    final String retryCause = String.format("Failed to execute non-collocated query (will retry) [localNodeId=%s, rmtNodeId=%s, reqId=%s, " + "errMsg=%s]", ctx.localNodeId(), node.id(), reqId, retryErr.getMessage());
                    sendRetry(node, reqId, segmentId, retryCause);
                } else {
                    QueryRetryException qryRetryErr = X.cause(e, QueryRetryException.class);
                    if (qryRetryErr != null)
                        sendError(node, reqId, qryRetryErr);
                    else {
                        if (e instanceof Error) {
                            U.error(log, "Failed to execute local query.", e);
                            throw (Error) e;
                        }
                        U.warn(log, "Failed to execute local query.", e);
                        sendError(node, reqId, e);
                    }
                }
            }
        }
    } finally {
        if (reserved != null)
            reserved.release();
        if (trace != null)
            trace.close();
        if (performanceStatsEnabled) {
            IoStatisticsHolder stat = IoStatisticsQueryHelper.finishGatheringQueryStatistics();
            if (stat.logicalReads() > 0 || stat.physicalReads() > 0) {
                ctx.performanceStatistics().queryReads(GridCacheQueryType.SQL_FIELDS, node.id(), reqId, stat.logicalReads(), stat.physicalReads());
            }
        }
    }
}
Also used : QueryRetryException(org.apache.ignite.cache.query.QueryRetryException) SQLException(java.sql.SQLException) GridQueryNextPageResponse(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageResponse) TraceSurroundings(org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings) DistributedJoinContext(org.apache.ignite.internal.processors.query.h2.opt.join.DistributedJoinContext) ResultSet(java.sql.ResultSet) JdbcResultSet(org.h2.jdbc.JdbcResultSet) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) JdbcResultSet(org.h2.jdbc.JdbcResultSet) H2PooledConnection(org.apache.ignite.internal.processors.query.h2.H2PooledConnection) MapH2QueryInfo(org.apache.ignite.internal.processors.query.h2.MapH2QueryInfo) GridH2RetryException(org.apache.ignite.internal.processors.query.h2.opt.GridH2RetryException) PreparedStatement(java.sql.PreparedStatement) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) IoStatisticsHolder(org.apache.ignite.internal.metric.IoStatisticsHolder) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException)

Example 4 with H2PooledConnection

use of org.apache.ignite.internal.processors.query.h2.H2PooledConnection in project ignite by apache.

the class QueryParser method parseH2.

/**
 * Parse and split query if needed, cache either two-step query or statement.
 *
 * @param schemaName Schema name.
 * @param qry Query.
 * @param batched Batched flag.
 * @param remainingAllowed Whether multiple statements are allowed.
 * @return Parsing result.
 */
@SuppressWarnings("IfMayBeConditional")
private QueryParserResult parseH2(String schemaName, SqlFieldsQuery qry, boolean batched, boolean remainingAllowed) {
    try (H2PooledConnection c = connMgr.connection(schemaName)) {
        // For queries that are explicitly local, we rely on the flag specified in the query
        // because this parsing result will be cached and used for queries directly.
        // For other queries, we enforce join order at this stage to avoid premature optimizations
        // (and therefore longer parsing) as long as there'll be more parsing at split stage.
        boolean enforceJoinOrderOnParsing = (!qry.isLocal() || qry.isEnforceJoinOrder());
        QueryContext qctx = QueryContext.parseContext(idx.backupFilter(null, null), qry.isLocal());
        H2Utils.setupConnection(c, qctx, false, enforceJoinOrderOnParsing, false);
        PreparedStatement stmt = null;
        try {
            stmt = c.prepareStatementNoCache(qry.getSql());
            if (qry.isLocal() && GridSqlQueryParser.checkMultipleStatements(stmt))
                throw new IgniteSQLException("Multiple statements queries are not supported for local queries.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            GridSqlQueryParser.PreparedWithRemaining prep = GridSqlQueryParser.preparedWithRemaining(stmt);
            Prepared prepared = prep.prepared();
            if (GridSqlQueryParser.isExplainUpdate(prepared))
                throw new IgniteSQLException("Explains of update queries are not supported.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            // Get remaining query and check if it is allowed.
            SqlFieldsQuery remainingQry = null;
            if (!F.isEmpty(prep.remainingSql())) {
                checkRemainingAllowed(remainingAllowed);
                remainingQry = cloneFieldsQuery(qry).setSql(prep.remainingSql());
            }
            // Prepare new query.
            SqlFieldsQuery newQry = cloneFieldsQuery(qry).setSql(prepared.getSQL());
            final int paramsCnt = prepared.getParameters().size();
            Object[] argsOrig = qry.getArgs();
            Object[] args = null;
            Object[] remainingArgs = null;
            if (!batched && paramsCnt > 0) {
                if (argsOrig == null || argsOrig.length < paramsCnt)
                    // Not enough parameters, but we will handle this later on execution phase.
                    args = argsOrig;
                else {
                    args = Arrays.copyOfRange(argsOrig, 0, paramsCnt);
                    if (paramsCnt != argsOrig.length)
                        remainingArgs = Arrays.copyOfRange(argsOrig, paramsCnt, argsOrig.length);
                }
            } else
                remainingArgs = argsOrig;
            newQry.setArgs(args);
            QueryDescriptor newQryDesc = queryDescriptor(schemaName, newQry);
            if (remainingQry != null)
                remainingQry.setArgs(remainingArgs);
            final List<JdbcParameterMeta> paramsMeta;
            try {
                paramsMeta = H2Utils.parametersMeta(stmt.getParameterMetaData());
                assert prepared.getParameters().size() == paramsMeta.size();
            } catch (IgniteCheckedException | SQLException e) {
                throw new IgniteSQLException("Failed to get parameters metadata", IgniteQueryErrorCode.UNKNOWN, e);
            }
            // Do actual parsing.
            if (CommandProcessor.isCommand(prepared)) {
                GridSqlStatement cmdH2 = new GridSqlQueryParser(false, log).parse(prepared);
                QueryParserResultCommand cmd = new QueryParserResultCommand(null, cmdH2, false);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, null, cmd);
            } else if (CommandProcessor.isCommandNoOp(prepared)) {
                QueryParserResultCommand cmd = new QueryParserResultCommand(null, null, true);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, null, cmd);
            } else if (GridSqlQueryParser.isDml(prepared)) {
                QueryParserResultDml dml = prepareDmlStatement(newQryDesc, prepared);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, dml, null);
            } else if (!prepared.isQuery()) {
                throw new IgniteSQLException("Unsupported statement: " + newQry.getSql(), IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            }
            // Parse SELECT.
            GridSqlQueryParser parser = new GridSqlQueryParser(false, log);
            GridSqlQuery selectStmt = (GridSqlQuery) parser.parse(prepared);
            List<Integer> cacheIds = parser.cacheIds();
            Integer mvccCacheId = mvccCacheIdForSelect(parser.objectsMap());
            // Calculate if query is in fact can be executed locally.
            boolean loc = qry.isLocal();
            if (!loc) {
                if (parser.isLocalQuery())
                    loc = true;
            }
            // If this is a local query, check if it must be split.
            boolean locSplit = false;
            if (loc) {
                GridCacheContext cctx = parser.getFirstPartitionedCache();
                if (cctx != null && cctx.config().getQueryParallelism() > 1)
                    locSplit = true;
            }
            // Split is required either if query is distributed, or when it is local, but executed
            // over segmented PARTITIONED case. In this case multiple map queries will be executed against local
            // node stripes in parallel and then merged through reduce process.
            boolean splitNeeded = !loc || locSplit;
            String forUpdateQryOutTx = null;
            String forUpdateQryTx = null;
            GridCacheTwoStepQuery forUpdateTwoStepQry = null;
            boolean forUpdate = GridSqlQueryParser.isForUpdateQuery(prepared);
            // column to be able to lock selected rows further.
            if (forUpdate) {
                // We have checked above that it's not an UNION query, so it's got to be SELECT.
                assert selectStmt instanceof GridSqlSelect;
                // Check FOR UPDATE invariants: only one table, MVCC is there.
                if (cacheIds.size() != 1)
                    throw new IgniteSQLException("SELECT FOR UPDATE is supported only for queries " + "that involve single transactional cache.");
                if (mvccCacheId == null)
                    throw new IgniteSQLException("SELECT FOR UPDATE query requires transactional cache " + "with MVCC enabled.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
                // We need a copy because we are going to modify AST a bit. We do not want to modify original select.
                GridSqlSelect selForUpdate = ((GridSqlSelect) selectStmt).copySelectForUpdate();
                // Clear forUpdate flag to run it as a plain query.
                selForUpdate.forUpdate(false);
                ((GridSqlSelect) selectStmt).forUpdate(false);
                // Remember sql string without FOR UPDATE clause.
                forUpdateQryOutTx = selForUpdate.getSQL();
                GridSqlAlias keyCol = keyColumn(selForUpdate);
                selForUpdate.addColumn(keyCol, true);
                // Remember sql string without FOR UPDATE clause and with _key column.
                forUpdateQryTx = selForUpdate.getSQL();
                // Prepare additional two-step query for FOR UPDATE case.
                if (splitNeeded) {
                    c.schema(newQry.getSchema());
                    forUpdateTwoStepQry = GridSqlQuerySplitter.split(c, selForUpdate, forUpdateQryTx, newQry.isCollocated(), newQry.isDistributedJoins(), newQry.isEnforceJoinOrder(), locSplit, idx, paramsCnt, log);
                }
            }
            GridCacheTwoStepQuery twoStepQry = null;
            if (splitNeeded) {
                GridSubqueryJoinOptimizer.pullOutSubQueries(selectStmt);
                c.schema(newQry.getSchema());
                twoStepQry = GridSqlQuerySplitter.split(c, selectStmt, newQry.getSql(), newQry.isCollocated(), newQry.isDistributedJoins(), newQry.isEnforceJoinOrder(), locSplit, idx, paramsCnt, log);
            }
            List<GridQueryFieldMetadata> meta = H2Utils.meta(stmt.getMetaData());
            QueryParserResultSelect select = new QueryParserResultSelect(selectStmt, twoStepQry, forUpdateTwoStepQry, meta, cacheIds, mvccCacheId, forUpdateQryOutTx, forUpdateQryTx);
            return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, select, null, null);
        } catch (IgniteCheckedException | SQLException e) {
            throw new IgniteSQLException("Failed to parse query. " + e.getMessage(), IgniteQueryErrorCode.PARSING, e);
        } finally {
            U.close(stmt, log);
        }
    }
}
Also used : GridSqlAlias(org.apache.ignite.internal.processors.query.h2.sql.GridSqlAlias) SQLException(java.sql.SQLException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridSqlStatement(org.apache.ignite.internal.processors.query.h2.sql.GridSqlStatement) Prepared(org.h2.command.Prepared) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridQueryFieldMetadata(org.apache.ignite.internal.processors.query.GridQueryFieldMetadata) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) JdbcParameterMeta(org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta) PreparedStatement(java.sql.PreparedStatement) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) GridSqlSelect(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSelect) SqlFieldsQuery(org.apache.ignite.cache.query.SqlFieldsQuery) GridSqlQuery(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery) GridSqlQueryParser(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException)

Example 5 with H2PooledConnection

use of org.apache.ignite.internal.processors.query.h2.H2PooledConnection in project ignite by apache.

the class SchemaManager method onCacheTypeCreated.

/**
 * Registers new class description.
 *
 * @param cacheInfo Cache info.
 * @param idx Indexing.
 * @param type Type descriptor.
 * @param isSql Whether SQL enabled.
 * @throws IgniteCheckedException If failed.
 */
public void onCacheTypeCreated(GridCacheContextInfo cacheInfo, IgniteH2Indexing idx, GridQueryTypeDescriptor type, boolean isSql) throws IgniteCheckedException {
    String schemaName = schemaName(cacheInfo.name());
    H2TableDescriptor tblDesc = new H2TableDescriptor(idx, schemaName, type, cacheInfo, isSql);
    H2Schema schema = schema(schemaName);
    try (H2PooledConnection conn = connMgr.connection(schema.schemaName())) {
        GridH2Table h2tbl = createTable(schema.schemaName(), schema, tblDesc, conn);
        schema.add(tblDesc);
        if (dataTables.putIfAbsent(h2tbl.identifier(), h2tbl) != null)
            throw new IllegalStateException("Table already exists: " + h2tbl.identifierString());
    } catch (SQLException e) {
        throw new IgniteCheckedException("Failed to register query type: " + tblDesc, e);
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) SQLException(java.sql.SQLException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table)

Aggregations

SQLException (java.sql.SQLException)7 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)7 PreparedStatement (java.sql.PreparedStatement)6 QueryContext (org.apache.ignite.internal.processors.query.h2.opt.QueryContext)6 QueryCancelledException (org.apache.ignite.cache.query.QueryCancelledException)5 H2PooledConnection (org.apache.ignite.internal.processors.query.h2.H2PooledConnection)5 ResultSet (java.sql.ResultSet)4 ArrayList (java.util.ArrayList)4 Collections.singletonList (java.util.Collections.singletonList)4 List (java.util.List)4 CacheException (javax.cache.CacheException)4 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)4 Session (org.h2.engine.Session)4 IgniteClientDisconnectedException (org.apache.ignite.IgniteClientDisconnectedException)3 IgniteException (org.apache.ignite.IgniteException)3 QueryRetryException (org.apache.ignite.cache.query.QueryRetryException)3 GridCacheSqlQuery (org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery)3 GridCacheTwoStepQuery (org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery)3 TraceSurroundings (org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings)3 IgniteTxAlreadyCompletedCheckedException (org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException)3