Search in sources :

Example 21 with AND

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.AND in project ignite by apache.

the class IgniteStatisticsConfigurationManager method updateStatistics.

/**
 * Update local statistic for specified database objects on the cluster.
 * Each node will scan local primary partitions to collect and update local statistic.
 *
 * @param targets DB objects to statistics update.
 */
public void updateStatistics(StatisticsObjectConfiguration... targets) {
    if (log.isDebugEnabled())
        log.debug("Update statistics [targets=" + targets + ']');
    for (StatisticsObjectConfiguration target : targets) {
        GridH2Table tbl = schemaMgr.dataTable(target.key().schema(), target.key().obj());
        validate(target, tbl);
        List<StatisticsColumnConfiguration> colCfgs;
        if (F.isEmpty(target.columns()))
            colCfgs = Arrays.stream(tbl.getColumns()).filter(c -> c.getColumnId() >= QueryUtils.DEFAULT_COLUMNS_COUNT).map(c -> new StatisticsColumnConfiguration(c.getName(), null)).collect(Collectors.toList());
        else
            colCfgs = new ArrayList<>(target.columns().values());
        StatisticsObjectConfiguration newCfg = new StatisticsObjectConfiguration(target.key(), colCfgs, target.maxPartitionObsolescencePercent());
        try {
            while (true) {
                String key = key2String(newCfg.key());
                StatisticsObjectConfiguration oldCfg = distrMetaStorage.read(key);
                StatisticsObjectConfiguration resultCfg = (oldCfg == null) ? newCfg : StatisticsObjectConfiguration.merge(oldCfg, newCfg);
                if (distrMetaStorage.compareAndSet(key, oldCfg, resultCfg))
                    break;
            }
        } catch (IgniteCheckedException ex) {
            throw new IgniteSQLException("Error on get or update statistic schema", IgniteQueryErrorCode.UNKNOWN, ex);
        }
    }
}
Also used : NodeStoppingException(org.apache.ignite.internal.NodeStoppingException) DiscoveryCustomEvent(org.apache.ignite.internal.events.DiscoveryCustomEvent) Arrays(java.util.Arrays) DiscoveryEvent(org.apache.ignite.events.DiscoveryEvent) QueryUtils(org.apache.ignite.internal.processors.query.QueryUtils) DistributedMetastorageLifecycleListener(org.apache.ignite.internal.processors.metastorage.DistributedMetastorageLifecycleListener) DistributedMetaStorage(org.apache.ignite.internal.processors.metastorage.DistributedMetaStorage) ClusterState(org.apache.ignite.cluster.ClusterState) IgniteLogger(org.apache.ignite.IgniteLogger) StatisticsColumnConfigurationViewWalker(org.apache.ignite.internal.managers.systemview.walker.StatisticsColumnConfigurationViewWalker) Function(java.util.function.Function) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) GridSystemViewManager(org.apache.ignite.internal.managers.systemview.GridSystemViewManager) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) DiscoveryCustomMessage(org.apache.ignite.internal.managers.discovery.DiscoveryCustomMessage) GridDhtPartitionsExchangeFuture(org.apache.ignite.internal.processors.cache.distributed.dht.preloader.GridDhtPartitionsExchangeFuture) StatisticsObjectConfiguration(org.apache.ignite.internal.processors.query.stat.config.StatisticsObjectConfiguration) X(org.apache.ignite.internal.util.typedef.X) BiConsumer(java.util.function.BiConsumer) IgniteThreadPoolExecutor(org.apache.ignite.thread.IgniteThreadPoolExecutor) StatisticsColumnConfiguration(org.apache.ignite.internal.processors.query.stat.config.StatisticsColumnConfiguration) SchemaManager(org.apache.ignite.internal.processors.query.h2.SchemaManager) F(org.apache.ignite.internal.util.typedef.F) Collection(java.util.Collection) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) ColumnConfigurationViewSupplier(org.apache.ignite.internal.processors.query.stat.view.ColumnConfigurationViewSupplier) Set(java.util.Set) IgniteQueryErrorCode(org.apache.ignite.internal.processors.cache.query.IgniteQueryErrorCode) Collectors(java.util.stream.Collectors) GridInternalSubscriptionProcessor(org.apache.ignite.internal.processors.subscription.GridInternalSubscriptionProcessor) List(java.util.List) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridClusterStateProcessor(org.apache.ignite.internal.processors.cluster.GridClusterStateProcessor) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) NotNull(org.jetbrains.annotations.NotNull) Collections(java.util.Collections) DynamicCacheChangeBatch(org.apache.ignite.internal.processors.cache.DynamicCacheChangeBatch) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) ArrayList(java.util.ArrayList) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) StatisticsColumnConfiguration(org.apache.ignite.internal.processors.query.stat.config.StatisticsColumnConfiguration) StatisticsObjectConfiguration(org.apache.ignite.internal.processors.query.stat.config.StatisticsObjectConfiguration)

Example 22 with AND

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.AND in project ignite by apache.

the class IgniteStatisticsManagerImpl method processObsolescence.

/**
 * Save dirty obsolescence info to local metastore. Check if statistics need to be refreshed and schedule it.
 *
 * 1) Get all dirty partition statistics.
 * 2) Make separate tasks for each key to avoid saving obsolescence info for removed partition (race).
 * 3) Check if partition should be recollected and add it to list in its tables task.
 * 4) Submit tasks. Actually obsolescence info will be stored during task processing.
 */
public synchronized void processObsolescence() {
    StatisticsUsageState usageState = usageState();
    if (usageState != ON || ctx.isStopping()) {
        if (log.isDebugEnabled())
            log.debug("Skipping obsolescence processing.");
        return;
    }
    if (log.isTraceEnabled())
        log.trace("Process statistics obsolescence started.");
    List<StatisticsKey> keys = statsRepos.getObsolescenceKeys();
    if (F.isEmpty(keys)) {
        if (log.isTraceEnabled())
            log.trace("No obsolescence info found. Finish obsolescence processing.");
        return;
    } else {
        if (log.isTraceEnabled())
            log.trace(String.format("Scheduling obsolescence savings for %d targets", keys.size()));
    }
    for (StatisticsKey key : keys) {
        StatisticsObjectConfiguration cfg = null;
        try {
            cfg = statCfgMgr.config(key);
        } catch (IgniteCheckedException e) {
        // No-op/
        }
        Set<Integer> tasksParts = calculateObsolescencedPartitions(cfg, statsRepos.getObsolescence(key));
        GridH2Table tbl = schemaMgr.dataTable(key.schema(), key.obj());
        if (tbl == null) {
            // Table can be removed earlier, but not already processed. Or somethink goes wrong. Try to reschedule.
            if (log.isDebugEnabled())
                log.debug(String.format("Got obsolescence statistics for unknown table %s", key));
        }
        LocalStatisticsGatheringContext ctx = new LocalStatisticsGatheringContext(true, tbl, cfg, tasksParts, null);
        statProc.updateLocalStatistics(ctx);
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) StatisticsObjectConfiguration(org.apache.ignite.internal.processors.query.stat.config.StatisticsObjectConfiguration)

Example 23 with AND

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.AND in project ignite by apache.

the class ValidateIndexesClosure method processIndex.

/**
 * @param cacheCtxWithIdx Cache context and appropriate index.
 * @param idleChecker Idle check closure.
 */
private Map<String, ValidateIndexesPartitionResult> processIndex(T2<GridCacheContext, Index> cacheCtxWithIdx, IgniteInClosure<Integer> idleChecker) {
    if (validateCtx.isCancelled())
        return emptyMap();
    GridCacheContext ctx = cacheCtxWithIdx.get1();
    Index idx = cacheCtxWithIdx.get2();
    ValidateIndexesPartitionResult idxValidationRes = new ValidateIndexesPartitionResult();
    boolean enoughIssues = false;
    Cursor cursor = null;
    try (Session session = mvccSession(cacheCtxWithIdx.get1())) {
        cursor = idx.find(session, null, null);
        if (cursor == null)
            throw new IgniteCheckedException("Can't iterate through index: " + idx);
    } catch (Throwable t) {
        IndexValidationIssue is = new IndexValidationIssue(null, ctx.name(), idx.getName(), t);
        log.error("Find in index failed: " + is.toString());
        idxValidationRes.reportIssue(is);
        enoughIssues = true;
    }
    final boolean skipConditions = checkFirst > 0 || checkThrough > 0;
    final boolean bothSkipConditions = checkFirst > 0 && checkThrough > 0;
    long current = 0;
    long processedNumber = 0;
    KeyCacheObject previousKey = null;
    while (!enoughIssues && !validateCtx.isCancelled()) {
        KeyCacheObject h2key = null;
        try {
            try {
                if (!cursor.next())
                    break;
            } catch (DbException e) {
                if (X.hasCause(e, CorruptedTreeException.class))
                    throw new IgniteCheckedException("Key is present in SQL index, but is missing in corresponding " + "data page. Previous successfully read key: " + CacheObjectUtils.unwrapBinaryIfNeeded(ctx.cacheObjectContext(), previousKey, true, true), X.cause(e, CorruptedTreeException.class));
                throw e;
            }
            H2CacheRow h2Row = (H2CacheRow) cursor.get();
            if (skipConditions) {
                if (bothSkipConditions) {
                    if (processedNumber > checkFirst)
                        break;
                    else if (current++ % checkThrough > 0)
                        continue;
                    else
                        processedNumber++;
                } else {
                    if (checkFirst > 0) {
                        if (current++ > checkFirst)
                            break;
                    } else {
                        if (current++ % checkThrough > 0)
                            continue;
                    }
                }
            }
            h2key = h2Row.key();
            if (h2Row.link() != 0L) {
                CacheDataRow cacheDataStoreRow = ctx.group().offheap().read(ctx, h2key);
                if (cacheDataStoreRow == null)
                    throw new IgniteCheckedException("Key is present in SQL index, but can't be found in CacheDataTree.");
            } else
                throw new IgniteCheckedException("Invalid index row, possibly deleted " + h2Row);
        } catch (Throwable t) {
            Object o = CacheObjectUtils.unwrapBinaryIfNeeded(ctx.cacheObjectContext(), h2key, true, true);
            IndexValidationIssue is = new IndexValidationIssue(String.valueOf(o), ctx.name(), idx.getName(), t);
            log.error("Failed to lookup key: " + is.toString());
            enoughIssues |= idxValidationRes.reportIssue(is);
        } finally {
            previousKey = h2key;
        }
    }
    CacheGroupContext group = ctx.group();
    String uniqueIdxName = String.format("[cacheGroup=%s, cacheGroupId=%s, cache=%s, cacheId=%s, idx=%s]", group.name(), group.groupId(), ctx.name(), ctx.cacheId(), idx.getName());
    idleChecker.apply(group.groupId());
    processedIndexes.incrementAndGet();
    printProgressOfIndexValidationIfNeeded();
    return Collections.singletonMap(uniqueIdxName, idxValidationRes);
}
Also used : CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) Index(org.h2.index.Index) H2CacheRow(org.apache.ignite.internal.processors.query.h2.opt.H2CacheRow) Cursor(org.h2.index.Cursor) DbException(org.h2.message.DbException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) CorruptedTreeException(org.apache.ignite.internal.processors.cache.persistence.tree.CorruptedTreeException) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) CacheGroupContext(org.apache.ignite.internal.processors.cache.CacheGroupContext) Session(org.h2.engine.Session) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject)

Example 24 with AND

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.AND in project ignite by apache.

the class ValidateIndexesClosure method mvccSession.

/**
 * Get session with MVCC snapshot and QueryContext.
 *
 * @param cctx Cache context.
 * @return Session with QueryContext and MVCC snapshot.
 * @throws IgniteCheckedException If failed.
 */
private Session mvccSession(GridCacheContext<?, ?> cctx) throws IgniteCheckedException {
    Session session = null;
    boolean mvccEnabled = cctx.mvccEnabled();
    if (mvccEnabled) {
        ConnectionManager connMgr = ((IgniteH2Indexing) ignite.context().query().getIndexing()).connections();
        JdbcConnection connection = (JdbcConnection) connMgr.connection().connection();
        session = (Session) connection.getSession();
        MvccQueryTracker tracker = MvccUtils.mvccTracker(cctx, true);
        MvccSnapshot mvccSnapshot = tracker.snapshot();
        final QueryContext qctx = new QueryContext(0, cacheName -> null, null, mvccSnapshot, null, true);
        session.setVariable(H2Utils.QCTX_VARIABLE_NAME, new H2Utils.ValueRuntimeSimpleObject<>(qctx));
    }
    return session;
}
Also used : H2Utils(org.apache.ignite.internal.processors.query.h2.H2Utils) MvccQueryTracker(org.apache.ignite.internal.processors.cache.mvcc.MvccQueryTracker) MvccSnapshot(org.apache.ignite.internal.processors.cache.mvcc.MvccSnapshot) ConnectionManager(org.apache.ignite.internal.processors.query.h2.ConnectionManager) JdbcConnection(org.h2.jdbc.JdbcConnection) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) IgniteH2Indexing(org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing) Session(org.h2.engine.Session)

Example 25 with AND

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.AND in project ignite by apache.

the class QueryParser method parseH2.

/**
 * Parse and split query if needed, cache either two-step query or statement.
 *
 * @param schemaName Schema name.
 * @param qry Query.
 * @param batched Batched flag.
 * @param remainingAllowed Whether multiple statements are allowed.
 * @return Parsing result.
 */
@SuppressWarnings("IfMayBeConditional")
private QueryParserResult parseH2(String schemaName, SqlFieldsQuery qry, boolean batched, boolean remainingAllowed) {
    try (H2PooledConnection c = connMgr.connection(schemaName)) {
        // For queries that are explicitly local, we rely on the flag specified in the query
        // because this parsing result will be cached and used for queries directly.
        // For other queries, we enforce join order at this stage to avoid premature optimizations
        // (and therefore longer parsing) as long as there'll be more parsing at split stage.
        boolean enforceJoinOrderOnParsing = (!qry.isLocal() || qry.isEnforceJoinOrder());
        QueryContext qctx = QueryContext.parseContext(idx.backupFilter(null, null), qry.isLocal());
        H2Utils.setupConnection(c, qctx, false, enforceJoinOrderOnParsing, false);
        PreparedStatement stmt = null;
        try {
            stmt = c.prepareStatementNoCache(qry.getSql());
            if (qry.isLocal() && GridSqlQueryParser.checkMultipleStatements(stmt))
                throw new IgniteSQLException("Multiple statements queries are not supported for local queries.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            GridSqlQueryParser.PreparedWithRemaining prep = GridSqlQueryParser.preparedWithRemaining(stmt);
            Prepared prepared = prep.prepared();
            if (GridSqlQueryParser.isExplainUpdate(prepared))
                throw new IgniteSQLException("Explains of update queries are not supported.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            // Get remaining query and check if it is allowed.
            SqlFieldsQuery remainingQry = null;
            if (!F.isEmpty(prep.remainingSql())) {
                checkRemainingAllowed(remainingAllowed);
                remainingQry = cloneFieldsQuery(qry).setSql(prep.remainingSql());
            }
            // Prepare new query.
            SqlFieldsQuery newQry = cloneFieldsQuery(qry).setSql(prepared.getSQL());
            final int paramsCnt = prepared.getParameters().size();
            Object[] argsOrig = qry.getArgs();
            Object[] args = null;
            Object[] remainingArgs = null;
            if (!batched && paramsCnt > 0) {
                if (argsOrig == null || argsOrig.length < paramsCnt)
                    // Not enough parameters, but we will handle this later on execution phase.
                    args = argsOrig;
                else {
                    args = Arrays.copyOfRange(argsOrig, 0, paramsCnt);
                    if (paramsCnt != argsOrig.length)
                        remainingArgs = Arrays.copyOfRange(argsOrig, paramsCnt, argsOrig.length);
                }
            } else
                remainingArgs = argsOrig;
            newQry.setArgs(args);
            QueryDescriptor newQryDesc = queryDescriptor(schemaName, newQry);
            if (remainingQry != null)
                remainingQry.setArgs(remainingArgs);
            final List<JdbcParameterMeta> paramsMeta;
            try {
                paramsMeta = H2Utils.parametersMeta(stmt.getParameterMetaData());
                assert prepared.getParameters().size() == paramsMeta.size();
            } catch (IgniteCheckedException | SQLException e) {
                throw new IgniteSQLException("Failed to get parameters metadata", IgniteQueryErrorCode.UNKNOWN, e);
            }
            // Do actual parsing.
            if (CommandProcessor.isCommand(prepared)) {
                GridSqlStatement cmdH2 = new GridSqlQueryParser(false, log).parse(prepared);
                QueryParserResultCommand cmd = new QueryParserResultCommand(null, cmdH2, false);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, null, cmd);
            } else if (CommandProcessor.isCommandNoOp(prepared)) {
                QueryParserResultCommand cmd = new QueryParserResultCommand(null, null, true);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, null, cmd);
            } else if (GridSqlQueryParser.isDml(prepared)) {
                QueryParserResultDml dml = prepareDmlStatement(newQryDesc, prepared);
                return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, null, dml, null);
            } else if (!prepared.isQuery()) {
                throw new IgniteSQLException("Unsupported statement: " + newQry.getSql(), IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
            }
            // Parse SELECT.
            GridSqlQueryParser parser = new GridSqlQueryParser(false, log);
            GridSqlQuery selectStmt = (GridSqlQuery) parser.parse(prepared);
            List<Integer> cacheIds = parser.cacheIds();
            Integer mvccCacheId = mvccCacheIdForSelect(parser.objectsMap());
            // Calculate if query is in fact can be executed locally.
            boolean loc = qry.isLocal();
            if (!loc) {
                if (parser.isLocalQuery())
                    loc = true;
            }
            // If this is a local query, check if it must be split.
            boolean locSplit = false;
            if (loc) {
                GridCacheContext cctx = parser.getFirstPartitionedCache();
                if (cctx != null && cctx.config().getQueryParallelism() > 1)
                    locSplit = true;
            }
            // Split is required either if query is distributed, or when it is local, but executed
            // over segmented PARTITIONED case. In this case multiple map queries will be executed against local
            // node stripes in parallel and then merged through reduce process.
            boolean splitNeeded = !loc || locSplit;
            String forUpdateQryOutTx = null;
            String forUpdateQryTx = null;
            GridCacheTwoStepQuery forUpdateTwoStepQry = null;
            boolean forUpdate = GridSqlQueryParser.isForUpdateQuery(prepared);
            // column to be able to lock selected rows further.
            if (forUpdate) {
                // We have checked above that it's not an UNION query, so it's got to be SELECT.
                assert selectStmt instanceof GridSqlSelect;
                // Check FOR UPDATE invariants: only one table, MVCC is there.
                if (cacheIds.size() != 1)
                    throw new IgniteSQLException("SELECT FOR UPDATE is supported only for queries " + "that involve single transactional cache.");
                if (mvccCacheId == null)
                    throw new IgniteSQLException("SELECT FOR UPDATE query requires transactional cache " + "with MVCC enabled.", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
                // We need a copy because we are going to modify AST a bit. We do not want to modify original select.
                GridSqlSelect selForUpdate = ((GridSqlSelect) selectStmt).copySelectForUpdate();
                // Clear forUpdate flag to run it as a plain query.
                selForUpdate.forUpdate(false);
                ((GridSqlSelect) selectStmt).forUpdate(false);
                // Remember sql string without FOR UPDATE clause.
                forUpdateQryOutTx = selForUpdate.getSQL();
                GridSqlAlias keyCol = keyColumn(selForUpdate);
                selForUpdate.addColumn(keyCol, true);
                // Remember sql string without FOR UPDATE clause and with _key column.
                forUpdateQryTx = selForUpdate.getSQL();
                // Prepare additional two-step query for FOR UPDATE case.
                if (splitNeeded) {
                    c.schema(newQry.getSchema());
                    forUpdateTwoStepQry = GridSqlQuerySplitter.split(c, selForUpdate, forUpdateQryTx, newQry.isCollocated(), newQry.isDistributedJoins(), newQry.isEnforceJoinOrder(), locSplit, idx, paramsCnt, log);
                }
            }
            GridCacheTwoStepQuery twoStepQry = null;
            if (splitNeeded) {
                GridSubqueryJoinOptimizer.pullOutSubQueries(selectStmt);
                c.schema(newQry.getSchema());
                twoStepQry = GridSqlQuerySplitter.split(c, selectStmt, newQry.getSql(), newQry.isCollocated(), newQry.isDistributedJoins(), newQry.isEnforceJoinOrder(), locSplit, idx, paramsCnt, log);
            }
            List<GridQueryFieldMetadata> meta = H2Utils.meta(stmt.getMetaData());
            QueryParserResultSelect select = new QueryParserResultSelect(selectStmt, twoStepQry, forUpdateTwoStepQry, meta, cacheIds, mvccCacheId, forUpdateQryOutTx, forUpdateQryTx);
            return new QueryParserResult(newQryDesc, queryParameters(newQry), remainingQry, paramsMeta, select, null, null);
        } catch (IgniteCheckedException | SQLException e) {
            throw new IgniteSQLException("Failed to parse query. " + e.getMessage(), IgniteQueryErrorCode.PARSING, e);
        } finally {
            U.close(stmt, log);
        }
    }
}
Also used : GridSqlAlias(org.apache.ignite.internal.processors.query.h2.sql.GridSqlAlias) SQLException(java.sql.SQLException) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException) GridSqlStatement(org.apache.ignite.internal.processors.query.h2.sql.GridSqlStatement) Prepared(org.h2.command.Prepared) GridCacheTwoStepQuery(org.apache.ignite.internal.processors.cache.query.GridCacheTwoStepQuery) GridQueryFieldMetadata(org.apache.ignite.internal.processors.query.GridQueryFieldMetadata) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) JdbcParameterMeta(org.apache.ignite.internal.processors.odbc.jdbc.JdbcParameterMeta) PreparedStatement(java.sql.PreparedStatement) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) GridSqlSelect(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSelect) SqlFieldsQuery(org.apache.ignite.cache.query.SqlFieldsQuery) GridSqlQuery(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQuery) GridSqlQueryParser(org.apache.ignite.internal.processors.query.h2.sql.GridSqlQueryParser) IgniteSQLException(org.apache.ignite.internal.processors.query.IgniteSQLException)

Aggregations

IgniteCheckedException (org.apache.ignite.IgniteCheckedException)37 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)33 ArrayList (java.util.ArrayList)26 GridH2Table (org.apache.ignite.internal.processors.query.h2.opt.GridH2Table)25 List (java.util.List)22 IgniteException (org.apache.ignite.IgniteException)21 SQLException (java.sql.SQLException)15 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)15 GridH2RowDescriptor (org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor)13 HashMap (java.util.HashMap)12 Column (org.h2.table.Column)12 LinkedHashMap (java.util.LinkedHashMap)11 GridQueryTypeDescriptor (org.apache.ignite.internal.processors.query.GridQueryTypeDescriptor)11 Index (org.h2.index.Index)11 PreparedStatement (java.sql.PreparedStatement)9 SqlFieldsQuery (org.apache.ignite.cache.query.SqlFieldsQuery)9 GridQueryProperty (org.apache.ignite.internal.processors.query.GridQueryProperty)9 UpdatePlan (org.apache.ignite.internal.processors.query.h2.dml.UpdatePlan)9 GridSqlColumn (org.apache.ignite.internal.processors.query.h2.sql.GridSqlColumn)9 GridSqlElement (org.apache.ignite.internal.processors.query.h2.sql.GridSqlElement)9