use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class GridIndexRebuildSelfTest method checkCntThreadForRebuildIdx.
/**
* Check that index rebuild uses the number of threads
* that specified in configuration.
*
* @param buildIdxThreadCnt Thread pool size for build index,
* after restart node.
* @throws Exception if failed.
*/
private void checkCntThreadForRebuildIdx(int buildIdxThreadCnt) throws Exception {
blkIndexingCls = null;
IgniteEx srv = startServer();
IgniteInternalCache internalCache = createAndFillTableWithIndex(srv);
int partCnt = internalCache.configuration().getAffinity().partitions();
assertTrue(partCnt > buildIdxThreadCnt);
File idxPath = indexFile(internalCache);
stopAllGrids();
assertTrue(delete(idxPath));
buildIdxThreadPoolSize = buildIdxThreadCnt;
srv = startServer();
srv.cache(CACHE_NAME).indexReadyFuture().get();
ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
long buildIdxRunnerCnt = LongStream.of(threadMXBean.getAllThreadIds()).mapToObj(threadMXBean::getThreadInfo).filter(threadInfo -> threadInfo.getThreadName().startsWith("build-idx-runner")).count();
assertEquals(buildIdxThreadCnt, buildIdxRunnerCnt);
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class GridReduceQueryExecutor method update.
/**
* @param schemaName Schema name.
* @param cacheIds Cache ids.
* @param selectQry Select query.
* @param params SQL parameters.
* @param enforceJoinOrder Enforce join order of tables.
* @param pageSize Page size.
* @param timeoutMillis Timeout.
* @param parts Partitions.
* @param isReplicatedOnly Whether query uses only replicated caches.
* @param cancel Cancel state.
* @return Update result, or {@code null} when some map node doesn't support distributed DML.
*/
@SuppressWarnings("IfMayBeConditional")
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
ReducePartitionMapResult nodesParts = mapper.nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
Collection<ClusterNode> nodes = nodesParts.nodes();
if (F.isEmpty(nodes))
throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
if (isReplicatedOnly) {
ClusterNode locNode = ctx.discovery().localNode();
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else
nodes = singletonList(F.rand(nodes));
}
for (ClusterNode n : nodes) {
if (!n.version().greaterThanEqual(2, 3, 0)) {
log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
return null;
}
}
final long reqId = qryReqIdGen.incrementAndGet();
h2.runningQueryManager().trackRequestId(reqId);
final DmlDistributedUpdateRun r = new DmlDistributedUpdateRun(nodes.size());
int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).explicitTimeout(true).flags(flags);
updRuns.put(reqId, r);
boolean release = false;
try {
Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
ReducePartitionsSpecializer partsSpec = (parts == null) ? null : new ReducePartitionsSpecializer(partsMap);
final Collection<ClusterNode> finalNodes = nodes;
cancel.add(() -> {
r.future().onCancelled();
send(finalNodes, new GridQueryCancelRequest(reqId), null, true);
});
// send() logs the debug message
if (send(nodes, req, partsSpec, false))
return r.future().get();
throw new CacheException("Failed to send update request to participating nodes.");
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
throw new CacheException("Failed to run SQL update query. " + e.getMessage(), e);
} finally {
if (release)
send(nodes, new GridQueryCancelRequest(reqId), null, false);
if (!updRuns.remove(reqId, r))
U.warn(log, "Update run was already removed: " + reqId);
}
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class GridReduceQueryExecutor method createMergeTable.
/**
* @param conn Connection.
* @param qry Query.
* @param explain Explain.
* @return Table.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
private ReduceTable createMergeTable(H2PooledConnection conn, GridCacheSqlQuery qry, boolean explain) throws IgniteCheckedException {
try {
Session ses = H2Utils.session(conn);
CreateTableData data = new CreateTableData();
data.tableName = "T___";
data.schema = ses.getDatabase().getSchema(ses.getCurrentSchemaName());
data.create = true;
if (!explain) {
LinkedHashMap<String, ?> colsMap = qry.columns();
assert colsMap != null;
ArrayList<Column> cols = new ArrayList<>(colsMap.size());
for (Map.Entry<String, ?> e : colsMap.entrySet()) {
String alias = e.getKey();
GridSqlType type = (GridSqlType) e.getValue();
assert !F.isEmpty(alias);
Column col0;
if (type == GridSqlType.UNKNOWN) {
// Special case for parameter being set at the top of the query (e.g. SELECT ? FROM ...).
// Re-map it to STRING in the same way it is done in H2, because any argument can be cast
// to string.
col0 = new Column(alias, Value.STRING);
} else {
col0 = new Column(alias, type.type(), type.precision(), type.scale(), type.displaySize());
}
cols.add(col0);
}
data.columns = cols;
} else
data.columns = planColumns();
boolean sortedIndex = !F.isEmpty(qry.sortColumns());
ReduceTable tbl = new ReduceTable(data);
ArrayList<Index> idxs = new ArrayList<>(2);
if (explain) {
idxs.add(new UnsortedReduceIndexAdapter(ctx, tbl, sortedIndex ? MERGE_INDEX_SORTED : MERGE_INDEX_UNSORTED));
} else if (sortedIndex) {
List<GridSqlSortColumn> sortCols = (List<GridSqlSortColumn>) qry.sortColumns();
SortedReduceIndexAdapter sortedMergeIdx = new SortedReduceIndexAdapter(ctx, tbl, MERGE_INDEX_SORTED, GridSqlSortColumn.toIndexColumns(tbl, sortCols));
idxs.add(ReduceTable.createScanIndex(sortedMergeIdx));
idxs.add(sortedMergeIdx);
} else
idxs.add(new UnsortedReduceIndexAdapter(ctx, tbl, MERGE_INDEX_UNSORTED));
tbl.indexes(idxs);
return tbl;
} catch (Exception e) {
throw new IgniteCheckedException(e);
}
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param qryId Query ID.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param pageSize Page size.
* @param distributedJoins Query distributed join mode.
* @param enforceJoinOrder Enforce join order H2 flag.
* @param replicated Replicated only flag.
* @param timeout Query timeout.
* @param params Query parameters.
* @param lazy Streaming flag.
* @param mvccSnapshot MVCC snapshot.
* @param dataPageScanEnabled If data page scan is enabled.
*/
private void onQueryRequest0(final ClusterNode node, final long qryId, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final boolean distributedJoins, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy, @Nullable final MvccSnapshot mvccSnapshot, Boolean dataPageScanEnabled, boolean treatReplicatedAsPartitioned) {
boolean performanceStatsEnabled = ctx.performanceStatistics().enabled();
if (performanceStatsEnabled)
IoStatisticsQueryHelper.startGatheringQueryStatistics();
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = mainCacheContext(cacheIds);
MapNodeResults nodeRess = resultsForNode(node.id());
MapQueryResults qryResults = null;
PartitionReservation reserved = null;
QueryContext qctx = null;
// We don't use try with resources on purpose - the catch block must also be executed in the context of this span.
TraceSurroundings trace = MTC.support(ctx.tracing().create(SQL_QRY_EXEC_REQ, MTC.span()).addTag(SQL_QRY_TEXT, () -> qrys.stream().map(GridCacheSqlQuery::query).collect(Collectors.joining("; "))));
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
reserved = h2.partitionReservationManager().reservePartitions(cacheIds, topVer, parts, node.id(), reqId);
if (reserved.failed()) {
sendRetry(node, reqId, segmentId, reserved.error());
return;
}
}
// Prepare query context.
DistributedJoinContext distributedJoinCtx = null;
if (distributedJoins && !replicated) {
distributedJoinCtx = new DistributedJoinContext(topVer, partsMap, node.id(), reqId, segmentId, pageSize);
}
qctx = new QueryContext(segmentId, h2.backupFilter(topVer, parts, treatReplicatedAsPartitioned), distributedJoinCtx, mvccSnapshot, reserved, true);
qryResults = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, lazy, qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
if (distributedJoinCtx != null)
qryCtxRegistry.setShared(node.id(), reqId, qctx);
if (nodeRess.put(reqId, segmentId, qryResults) != null)
throw new IllegalStateException();
if (nodeRess.cancelled(reqId)) {
qryCtxRegistry.clearShared(node.id(), reqId);
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
H2PooledConnection conn = h2.connections().connection(schemaName);
H2Utils.setupConnection(conn, qctx, distributedJoins, enforceJoinOrder, lazy);
MapQueryResult res = new MapQueryResult(h2, mainCctx, node.id(), qry, params, conn, log);
qryResults.addResult(qryIdx, res);
try {
res.lock();
// Ensure we are on the target node for this replicated query.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
String sql = qry.query();
Collection<Object> params0 = F.asList(qry.parameters(params));
PreparedStatement stmt = conn.prepareStatement(sql, H2StatementCache.queryFlags(distributedJoins, enforceJoinOrder));
H2Utils.bindParameters(stmt, params0);
MapH2QueryInfo qryInfo = new MapH2QueryInfo(stmt, qry.query(), node.id(), qryId, reqId, segmentId);
ResultSet rs = h2.executeSqlQueryWithTimer(stmt, conn, sql, timeout, qryResults.queryCancel(qryIdx), dataPageScanEnabled, qryInfo);
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
if (qryResults.cancelled()) {
rs.close();
throw new QueryCancelledException();
}
res.openResult(rs, qryInfo);
final GridQueryNextPageResponse msg = prepareNextPage(nodeRess, node, qryResults, qryIdx, segmentId, pageSize, dataPageScanEnabled);
if (msg != null)
sendNextPage(node, msg);
} else {
assert !qry.isPartitioned();
qryResults.closeResult(qryIdx);
}
qryIdx++;
} finally {
try {
res.unlockTables();
} finally {
res.unlock();
}
}
}
if (!lazy)
qryResults.releaseQueryContext();
} catch (Throwable e) {
if (qryResults != null) {
nodeRess.remove(reqId, segmentId, qryResults);
qryResults.close();
// If a query is cancelled before execution is started partitions have to be released.
if (!lazy || !qryResults.isAllClosed())
qryResults.releaseQueryContext();
} else
releaseReservations(qctx);
if (e instanceof QueryCancelledException)
sendError(node, reqId, e);
else {
SQLException sqlEx = X.cause(e, SQLException.class);
if (sqlEx != null && sqlEx.getErrorCode() == ErrorCode.STATEMENT_WAS_CANCELED)
sendQueryCancel(node, reqId);
else {
GridH2RetryException retryErr = X.cause(e, GridH2RetryException.class);
if (retryErr != null) {
final String retryCause = String.format("Failed to execute non-collocated query (will retry) [localNodeId=%s, rmtNodeId=%s, reqId=%s, " + "errMsg=%s]", ctx.localNodeId(), node.id(), reqId, retryErr.getMessage());
sendRetry(node, reqId, segmentId, retryCause);
} else {
QueryRetryException qryRetryErr = X.cause(e, QueryRetryException.class);
if (qryRetryErr != null)
sendError(node, reqId, qryRetryErr);
else {
if (e instanceof Error) {
U.error(log, "Failed to execute local query.", e);
throw (Error) e;
}
U.warn(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
}
}
}
}
} finally {
if (reserved != null)
reserved.release();
if (trace != null)
trace.close();
if (performanceStatsEnabled) {
IoStatisticsHolder stat = IoStatisticsQueryHelper.finishGatheringQueryStatistics();
if (stat.logicalReads() > 0 || stat.physicalReads() > 0) {
ctx.performanceStatistics().queryReads(GridCacheQueryType.SQL_FIELDS, node.id(), reqId, stat.logicalReads(), stat.physicalReads());
}
}
}
}
use of org.apache.ignite.internal.processors.query.h2.sql.GridSqlOperationType.IN in project ignite by apache.
the class IgniteStatisticsManagerImpl method processObsolescence.
/**
* Save dirty obsolescence info to local metastore. Check if statistics need to be refreshed and schedule it.
*
* 1) Get all dirty partition statistics.
* 2) Make separate tasks for each key to avoid saving obsolescence info for removed partition (race).
* 3) Check if partition should be recollected and add it to list in its tables task.
* 4) Submit tasks. Actually obsolescence info will be stored during task processing.
*/
public synchronized void processObsolescence() {
StatisticsUsageState usageState = usageState();
if (usageState != ON || ctx.isStopping()) {
if (log.isDebugEnabled())
log.debug("Skipping obsolescence processing.");
return;
}
if (log.isTraceEnabled())
log.trace("Process statistics obsolescence started.");
List<StatisticsKey> keys = statsRepos.getObsolescenceKeys();
if (F.isEmpty(keys)) {
if (log.isTraceEnabled())
log.trace("No obsolescence info found. Finish obsolescence processing.");
return;
} else {
if (log.isTraceEnabled())
log.trace(String.format("Scheduling obsolescence savings for %d targets", keys.size()));
}
for (StatisticsKey key : keys) {
StatisticsObjectConfiguration cfg = null;
try {
cfg = statCfgMgr.config(key);
} catch (IgniteCheckedException e) {
// No-op/
}
Set<Integer> tasksParts = calculateObsolescencedPartitions(cfg, statsRepos.getObsolescence(key));
GridH2Table tbl = schemaMgr.dataTable(key.schema(), key.obj());
if (tbl == null) {
// Table can be removed earlier, but not already processed. Or somethink goes wrong. Try to reschedule.
if (log.isDebugEnabled())
log.debug(String.format("Got obsolescence statistics for unknown table %s", key));
}
LocalStatisticsGatheringContext ctx = new LocalStatisticsGatheringContext(true, tbl, cfg, tasksParts, null);
statProc.updateLocalStatistics(ctx);
}
}
Aggregations