Search in sources :

Example 16 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class GridIndexRebuildSelfTest method checkCntThreadForRebuildIdx.

/**
 * Check that index rebuild uses the number of threads
 * that specified in configuration.
 *
 * @param buildIdxThreadCnt Thread pool size for build index,
 *      after restart node.
 * @throws Exception if failed.
 */
private void checkCntThreadForRebuildIdx(int buildIdxThreadCnt) throws Exception {
    blkIndexingCls = null;
    IgniteEx srv = startServer();
    IgniteInternalCache internalCache = createAndFillTableWithIndex(srv);
    int partCnt = internalCache.configuration().getAffinity().partitions();
    assertTrue(partCnt > buildIdxThreadCnt);
    File idxPath = indexFile(internalCache);
    stopAllGrids();
    assertTrue(delete(idxPath));
    buildIdxThreadPoolSize = buildIdxThreadCnt;
    srv = startServer();
    srv.cache(CACHE_NAME).indexReadyFuture().get();
    ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
    long buildIdxRunnerCnt = LongStream.of(threadMXBean.getAllThreadIds()).mapToObj(threadMXBean::getThreadInfo).filter(threadInfo -> threadInfo.getThreadName().startsWith("build-idx-runner")).count();
    assertEquals(buildIdxThreadCnt, buildIdxRunnerCnt);
}
Also used : IndexRebuildCancelToken(org.apache.ignite.internal.processors.query.schema.IndexRebuildCancelToken) GridFutureAdapter(org.apache.ignite.internal.util.future.GridFutureAdapter) U(org.apache.ignite.internal.util.typedef.internal.U) IgniteEx(org.apache.ignite.internal.IgniteEx) GridCursor(org.apache.ignite.internal.util.lang.GridCursor) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) DynamicIndexAbstractSelfTest(org.apache.ignite.internal.processors.cache.index.DynamicIndexAbstractSelfTest) SchemaIndexCacheVisitorClosure(org.apache.ignite.internal.processors.query.schema.SchemaIndexCacheVisitorClosure) Objects.requireNonNull(java.util.Objects.requireNonNull) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) ManagementFactory(java.lang.management.ManagementFactory) IgniteInternalCache(org.apache.ignite.internal.processors.cache.IgniteInternalCache) IndexesRebuildTask(org.apache.ignite.internal.managers.indexing.IndexesRebuildTask) IndexProcessor(org.apache.ignite.internal.cache.query.index.IndexProcessor) LongStream(java.util.stream.LongStream) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) FilePageStoreManager(org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager) Test(org.junit.Test) ThreadMXBean(java.lang.management.ThreadMXBean) Ignite(org.apache.ignite.Ignite) CacheDataRow(org.apache.ignite.internal.processors.cache.persistence.CacheDataRow) File(java.io.File) IgniteCache(org.apache.ignite.IgniteCache) IgniteCacheOffheapManager(org.apache.ignite.internal.processors.cache.IgniteCacheOffheapManager) CountDownLatch(java.util.concurrent.CountDownLatch) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) INDEX_FILE_NAME(org.apache.ignite.internal.processors.cache.persistence.file.FilePageStoreManager.INDEX_FILE_NAME) DFLT_SCHEMA(org.apache.ignite.internal.processors.query.QueryUtils.DFLT_SCHEMA) IgniteUtils.delete(org.apache.ignite.internal.util.IgniteUtils.delete) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) Objects.nonNull(java.util.Objects.nonNull) ThreadMXBean(java.lang.management.ThreadMXBean) IgniteEx(org.apache.ignite.internal.IgniteEx) IgniteInternalCache(org.apache.ignite.internal.processors.cache.IgniteInternalCache) File(java.io.File)

Example 17 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class GridReduceQueryExecutor method update.

/**
 * @param schemaName Schema name.
 * @param cacheIds Cache ids.
 * @param selectQry Select query.
 * @param params SQL parameters.
 * @param enforceJoinOrder Enforce join order of tables.
 * @param pageSize Page size.
 * @param timeoutMillis Timeout.
 * @param parts Partitions.
 * @param isReplicatedOnly Whether query uses only replicated caches.
 * @param cancel Cancel state.
 * @return Update result, or {@code null} when some map node doesn't support distributed DML.
 */
@SuppressWarnings("IfMayBeConditional")
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
    AffinityTopologyVersion topVer = h2.readyTopologyVersion();
    ReducePartitionMapResult nodesParts = mapper.nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
    Collection<ClusterNode> nodes = nodesParts.nodes();
    if (F.isEmpty(nodes))
        throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
    if (isReplicatedOnly) {
        ClusterNode locNode = ctx.discovery().localNode();
        if (nodes.contains(locNode))
            nodes = singletonList(locNode);
        else
            nodes = singletonList(F.rand(nodes));
    }
    for (ClusterNode n : nodes) {
        if (!n.version().greaterThanEqual(2, 3, 0)) {
            log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
            return null;
        }
    }
    final long reqId = qryReqIdGen.incrementAndGet();
    h2.runningQueryManager().trackRequestId(reqId);
    final DmlDistributedUpdateRun r = new DmlDistributedUpdateRun(nodes.size());
    int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
    if (isReplicatedOnly)
        flags |= GridH2QueryRequest.FLAG_REPLICATED;
    GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).explicitTimeout(true).flags(flags);
    updRuns.put(reqId, r);
    boolean release = false;
    try {
        Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
        ReducePartitionsSpecializer partsSpec = (parts == null) ? null : new ReducePartitionsSpecializer(partsMap);
        final Collection<ClusterNode> finalNodes = nodes;
        cancel.add(() -> {
            r.future().onCancelled();
            send(finalNodes, new GridQueryCancelRequest(reqId), null, true);
        });
        // send() logs the debug message
        if (send(nodes, req, partsSpec, false))
            return r.future().get();
        throw new CacheException("Failed to send update request to participating nodes.");
    } catch (IgniteCheckedException | RuntimeException e) {
        release = true;
        U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
        throw new CacheException("Failed to run SQL update query. " + e.getMessage(), e);
    } finally {
        if (release)
            send(nodes, new GridQueryCancelRequest(reqId), null, false);
        if (!updRuns.remove(reqId, r))
            U.warn(log, "Update run was already removed: " + reqId);
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) GridH2DmlRequest(org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest) GridQueryCancelRequest(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryCancelRequest) AffinityTopologyVersion(org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion) CacheException(javax.cache.CacheException) DmlDistributedUpdateRun(org.apache.ignite.internal.processors.query.h2.dml.DmlDistributedUpdateRun) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IntArray(org.h2.util.IntArray)

Example 18 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class GridReduceQueryExecutor method createMergeTable.

/**
 * @param conn Connection.
 * @param qry Query.
 * @param explain Explain.
 * @return Table.
 * @throws IgniteCheckedException If failed.
 */
@SuppressWarnings("unchecked")
private ReduceTable createMergeTable(H2PooledConnection conn, GridCacheSqlQuery qry, boolean explain) throws IgniteCheckedException {
    try {
        Session ses = H2Utils.session(conn);
        CreateTableData data = new CreateTableData();
        data.tableName = "T___";
        data.schema = ses.getDatabase().getSchema(ses.getCurrentSchemaName());
        data.create = true;
        if (!explain) {
            LinkedHashMap<String, ?> colsMap = qry.columns();
            assert colsMap != null;
            ArrayList<Column> cols = new ArrayList<>(colsMap.size());
            for (Map.Entry<String, ?> e : colsMap.entrySet()) {
                String alias = e.getKey();
                GridSqlType type = (GridSqlType) e.getValue();
                assert !F.isEmpty(alias);
                Column col0;
                if (type == GridSqlType.UNKNOWN) {
                    // Special case for parameter being set at the top of the query (e.g. SELECT ? FROM ...).
                    // Re-map it to STRING in the same way it is done in H2, because any argument can be cast
                    // to string.
                    col0 = new Column(alias, Value.STRING);
                } else {
                    col0 = new Column(alias, type.type(), type.precision(), type.scale(), type.displaySize());
                }
                cols.add(col0);
            }
            data.columns = cols;
        } else
            data.columns = planColumns();
        boolean sortedIndex = !F.isEmpty(qry.sortColumns());
        ReduceTable tbl = new ReduceTable(data);
        ArrayList<Index> idxs = new ArrayList<>(2);
        if (explain) {
            idxs.add(new UnsortedReduceIndexAdapter(ctx, tbl, sortedIndex ? MERGE_INDEX_SORTED : MERGE_INDEX_UNSORTED));
        } else if (sortedIndex) {
            List<GridSqlSortColumn> sortCols = (List<GridSqlSortColumn>) qry.sortColumns();
            SortedReduceIndexAdapter sortedMergeIdx = new SortedReduceIndexAdapter(ctx, tbl, MERGE_INDEX_SORTED, GridSqlSortColumn.toIndexColumns(tbl, sortCols));
            idxs.add(ReduceTable.createScanIndex(sortedMergeIdx));
            idxs.add(sortedMergeIdx);
        } else
            idxs.add(new UnsortedReduceIndexAdapter(ctx, tbl, MERGE_INDEX_UNSORTED));
        tbl.indexes(idxs);
        return tbl;
    } catch (Exception e) {
        throw new IgniteCheckedException(e);
    }
}
Also used : ArrayList(java.util.ArrayList) Index(org.h2.index.Index) CreateTableData(org.h2.command.ddl.CreateTableData) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException) IgniteClientDisconnectedException(org.apache.ignite.IgniteClientDisconnectedException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) IgniteException(org.apache.ignite.IgniteException) TransactionAlreadyCompletedException(org.apache.ignite.transactions.TransactionAlreadyCompletedException) IgniteTxAlreadyCompletedCheckedException(org.apache.ignite.internal.transactions.IgniteTxAlreadyCompletedCheckedException) QueryRetryException(org.apache.ignite.cache.query.QueryRetryException) SQLException(java.sql.SQLException) IgniteInterruptedCheckedException(org.apache.ignite.internal.IgniteInterruptedCheckedException) CacheException(javax.cache.CacheException) TransactionException(org.apache.ignite.transactions.TransactionException) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridSqlSortColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSortColumn) Column(org.h2.table.Column) GridSqlSortColumn(org.apache.ignite.internal.processors.query.h2.sql.GridSqlSortColumn) GridSqlType(org.apache.ignite.internal.processors.query.h2.sql.GridSqlType) Collections.singletonList(java.util.Collections.singletonList) List(java.util.List) ArrayList(java.util.ArrayList) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) LinkedHashMap(java.util.LinkedHashMap) Collections.singletonMap(java.util.Collections.singletonMap) Session(org.h2.engine.Session)

Example 19 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class GridMapQueryExecutor method onQueryRequest0.

/**
 * @param node Node authored request.
 * @param qryId Query ID.
 * @param reqId Request ID.
 * @param segmentId index segment ID.
 * @param schemaName Schema name.
 * @param qrys Queries to execute.
 * @param cacheIds Caches which will be affected by these queries.
 * @param topVer Topology version.
 * @param partsMap Partitions map for unstable topology.
 * @param parts Explicit partitions for current node.
 * @param pageSize Page size.
 * @param distributedJoins Query distributed join mode.
 * @param enforceJoinOrder Enforce join order H2 flag.
 * @param replicated Replicated only flag.
 * @param timeout Query timeout.
 * @param params Query parameters.
 * @param lazy Streaming flag.
 * @param mvccSnapshot MVCC snapshot.
 * @param dataPageScanEnabled If data page scan is enabled.
 */
private void onQueryRequest0(final ClusterNode node, final long qryId, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final boolean distributedJoins, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy, @Nullable final MvccSnapshot mvccSnapshot, Boolean dataPageScanEnabled, boolean treatReplicatedAsPartitioned) {
    boolean performanceStatsEnabled = ctx.performanceStatistics().enabled();
    if (performanceStatsEnabled)
        IoStatisticsQueryHelper.startGatheringQueryStatistics();
    // Prepare to run queries.
    GridCacheContext<?, ?> mainCctx = mainCacheContext(cacheIds);
    MapNodeResults nodeRess = resultsForNode(node.id());
    MapQueryResults qryResults = null;
    PartitionReservation reserved = null;
    QueryContext qctx = null;
    // We don't use try with resources on purpose - the catch block must also be executed in the context of this span.
    TraceSurroundings trace = MTC.support(ctx.tracing().create(SQL_QRY_EXEC_REQ, MTC.span()).addTag(SQL_QRY_TEXT, () -> qrys.stream().map(GridCacheSqlQuery::query).collect(Collectors.joining("; "))));
    try {
        if (topVer != null) {
            // Reserve primary for topology version or explicit partitions.
            reserved = h2.partitionReservationManager().reservePartitions(cacheIds, topVer, parts, node.id(), reqId);
            if (reserved.failed()) {
                sendRetry(node, reqId, segmentId, reserved.error());
                return;
            }
        }
        // Prepare query context.
        DistributedJoinContext distributedJoinCtx = null;
        if (distributedJoins && !replicated) {
            distributedJoinCtx = new DistributedJoinContext(topVer, partsMap, node.id(), reqId, segmentId, pageSize);
        }
        qctx = new QueryContext(segmentId, h2.backupFilter(topVer, parts, treatReplicatedAsPartitioned), distributedJoinCtx, mvccSnapshot, reserved, true);
        qryResults = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, lazy, qctx);
        // qctx is set, we have to release reservations inside of it.
        reserved = null;
        if (distributedJoinCtx != null)
            qryCtxRegistry.setShared(node.id(), reqId, qctx);
        if (nodeRess.put(reqId, segmentId, qryResults) != null)
            throw new IllegalStateException();
        if (nodeRess.cancelled(reqId)) {
            qryCtxRegistry.clearShared(node.id(), reqId);
            nodeRess.cancelRequest(reqId);
            throw new QueryCancelledException();
        }
        // Run queries.
        int qryIdx = 0;
        boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
        for (GridCacheSqlQuery qry : qrys) {
            H2PooledConnection conn = h2.connections().connection(schemaName);
            H2Utils.setupConnection(conn, qctx, distributedJoins, enforceJoinOrder, lazy);
            MapQueryResult res = new MapQueryResult(h2, mainCctx, node.id(), qry, params, conn, log);
            qryResults.addResult(qryIdx, res);
            try {
                res.lock();
                // Ensure we are on the target node for this replicated query.
                if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
                    String sql = qry.query();
                    Collection<Object> params0 = F.asList(qry.parameters(params));
                    PreparedStatement stmt = conn.prepareStatement(sql, H2StatementCache.queryFlags(distributedJoins, enforceJoinOrder));
                    H2Utils.bindParameters(stmt, params0);
                    MapH2QueryInfo qryInfo = new MapH2QueryInfo(stmt, qry.query(), node.id(), qryId, reqId, segmentId);
                    ResultSet rs = h2.executeSqlQueryWithTimer(stmt, conn, sql, timeout, qryResults.queryCancel(qryIdx), dataPageScanEnabled, qryInfo);
                    if (evt) {
                        ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
                    }
                    assert rs instanceof JdbcResultSet : rs.getClass();
                    if (qryResults.cancelled()) {
                        rs.close();
                        throw new QueryCancelledException();
                    }
                    res.openResult(rs, qryInfo);
                    final GridQueryNextPageResponse msg = prepareNextPage(nodeRess, node, qryResults, qryIdx, segmentId, pageSize, dataPageScanEnabled);
                    if (msg != null)
                        sendNextPage(node, msg);
                } else {
                    assert !qry.isPartitioned();
                    qryResults.closeResult(qryIdx);
                }
                qryIdx++;
            } finally {
                try {
                    res.unlockTables();
                } finally {
                    res.unlock();
                }
            }
        }
        if (!lazy)
            qryResults.releaseQueryContext();
    } catch (Throwable e) {
        if (qryResults != null) {
            nodeRess.remove(reqId, segmentId, qryResults);
            qryResults.close();
            // If a query is cancelled before execution is started partitions have to be released.
            if (!lazy || !qryResults.isAllClosed())
                qryResults.releaseQueryContext();
        } else
            releaseReservations(qctx);
        if (e instanceof QueryCancelledException)
            sendError(node, reqId, e);
        else {
            SQLException sqlEx = X.cause(e, SQLException.class);
            if (sqlEx != null && sqlEx.getErrorCode() == ErrorCode.STATEMENT_WAS_CANCELED)
                sendQueryCancel(node, reqId);
            else {
                GridH2RetryException retryErr = X.cause(e, GridH2RetryException.class);
                if (retryErr != null) {
                    final String retryCause = String.format("Failed to execute non-collocated query (will retry) [localNodeId=%s, rmtNodeId=%s, reqId=%s, " + "errMsg=%s]", ctx.localNodeId(), node.id(), reqId, retryErr.getMessage());
                    sendRetry(node, reqId, segmentId, retryCause);
                } else {
                    QueryRetryException qryRetryErr = X.cause(e, QueryRetryException.class);
                    if (qryRetryErr != null)
                        sendError(node, reqId, qryRetryErr);
                    else {
                        if (e instanceof Error) {
                            U.error(log, "Failed to execute local query.", e);
                            throw (Error) e;
                        }
                        U.warn(log, "Failed to execute local query.", e);
                        sendError(node, reqId, e);
                    }
                }
            }
        }
    } finally {
        if (reserved != null)
            reserved.release();
        if (trace != null)
            trace.close();
        if (performanceStatsEnabled) {
            IoStatisticsHolder stat = IoStatisticsQueryHelper.finishGatheringQueryStatistics();
            if (stat.logicalReads() > 0 || stat.physicalReads() > 0) {
                ctx.performanceStatistics().queryReads(GridCacheQueryType.SQL_FIELDS, node.id(), reqId, stat.logicalReads(), stat.physicalReads());
            }
        }
    }
}
Also used : QueryRetryException(org.apache.ignite.cache.query.QueryRetryException) SQLException(java.sql.SQLException) GridQueryNextPageResponse(org.apache.ignite.internal.processors.query.h2.twostep.messages.GridQueryNextPageResponse) TraceSurroundings(org.apache.ignite.internal.processors.tracing.MTC.TraceSurroundings) DistributedJoinContext(org.apache.ignite.internal.processors.query.h2.opt.join.DistributedJoinContext) ResultSet(java.sql.ResultSet) JdbcResultSet(org.h2.jdbc.JdbcResultSet) GridCacheSqlQuery(org.apache.ignite.internal.processors.cache.query.GridCacheSqlQuery) JdbcResultSet(org.h2.jdbc.JdbcResultSet) H2PooledConnection(org.apache.ignite.internal.processors.query.h2.H2PooledConnection) MapH2QueryInfo(org.apache.ignite.internal.processors.query.h2.MapH2QueryInfo) GridH2RetryException(org.apache.ignite.internal.processors.query.h2.opt.GridH2RetryException) PreparedStatement(java.sql.PreparedStatement) QueryContext(org.apache.ignite.internal.processors.query.h2.opt.QueryContext) IoStatisticsHolder(org.apache.ignite.internal.metric.IoStatisticsHolder) QueryCancelledException(org.apache.ignite.cache.query.QueryCancelledException)

Example 20 with IN

use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.

the class IgniteStatisticsManagerImpl method processObsolescence.

/**
 * Save dirty obsolescence info to local metastore. Check if statistics need to be refreshed and schedule it.
 *
 * 1) Get all dirty partition statistics.
 * 2) Make separate tasks for each key to avoid saving obsolescence info for removed partition (race).
 * 3) Check if partition should be recollected and add it to list in its tables task.
 * 4) Submit tasks. Actually obsolescence info will be stored during task processing.
 */
public synchronized void processObsolescence() {
    StatisticsUsageState usageState = usageState();
    if (usageState != ON || ctx.isStopping()) {
        if (log.isDebugEnabled())
            log.debug("Skipping obsolescence processing.");
        return;
    }
    if (log.isTraceEnabled())
        log.trace("Process statistics obsolescence started.");
    List<StatisticsKey> keys = statsRepos.getObsolescenceKeys();
    if (F.isEmpty(keys)) {
        if (log.isTraceEnabled())
            log.trace("No obsolescence info found. Finish obsolescence processing.");
        return;
    } else {
        if (log.isTraceEnabled())
            log.trace(String.format("Scheduling obsolescence savings for %d targets", keys.size()));
    }
    for (StatisticsKey key : keys) {
        StatisticsObjectConfiguration cfg = null;
        try {
            cfg = statCfgMgr.config(key);
        } catch (IgniteCheckedException e) {
        // No-op/
        }
        Set<Integer> tasksParts = calculateObsolescencedPartitions(cfg, statsRepos.getObsolescence(key));
        GridH2Table tbl = schemaMgr.dataTable(key.schema(), key.obj());
        if (tbl == null) {
            // Table can be removed earlier, but not already processed. Or somethink goes wrong. Try to reschedule.
            if (log.isDebugEnabled())
                log.debug(String.format("Got obsolescence statistics for unknown table %s", key));
        }
        LocalStatisticsGatheringContext ctx = new LocalStatisticsGatheringContext(true, tbl, cfg, tasksParts, null);
        statProc.updateLocalStatistics(ctx);
    }
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) GridH2Table(org.apache.ignite.internal.processors.query.h2.opt.GridH2Table) StatisticsObjectConfiguration(org.apache.ignite.internal.processors.query.stat.config.StatisticsObjectConfiguration)

Aggregations

IgniteCheckedException (org.apache.ignite.IgniteCheckedException)35 ArrayList (java.util.ArrayList)29 IgniteException (org.apache.ignite.IgniteException)26 IgniteSQLException (org.apache.ignite.internal.processors.query.IgniteSQLException)26 SQLException (java.sql.SQLException)21 List (java.util.List)21 GridH2Table (org.apache.ignite.internal.processors.query.h2.opt.GridH2Table)21 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)19 CacheException (javax.cache.CacheException)15 QueryCancelledException (org.apache.ignite.cache.query.QueryCancelledException)13 GridH2RowDescriptor (org.apache.ignite.internal.processors.query.h2.opt.GridH2RowDescriptor)13 Column (org.h2.table.Column)13 IgniteH2Indexing (org.apache.ignite.internal.processors.query.h2.IgniteH2Indexing)10 IgniteBiTuple (org.apache.ignite.lang.IgniteBiTuple)10 ResultSet (java.sql.ResultSet)9 HashMap (java.util.HashMap)9 CacheDataRow (org.apache.ignite.internal.processors.cache.persistence.CacheDataRow)9 GridSqlSelect (org.apache.ignite.internal.processors.query.h2.sql.GridSqlSelect)9 PreparedStatement (java.sql.PreparedStatement)8 HashSet (java.util.HashSet)8