use of org.apache.ignite.cache.query.QueryRetryException in project ignite by apache.
the class GridMapQueryExecutor method onQueryRequest0.
/**
* @param node Node authored request.
* @param qryId Query ID.
* @param reqId Request ID.
* @param segmentId index segment ID.
* @param schemaName Schema name.
* @param qrys Queries to execute.
* @param cacheIds Caches which will be affected by these queries.
* @param topVer Topology version.
* @param partsMap Partitions map for unstable topology.
* @param parts Explicit partitions for current node.
* @param pageSize Page size.
* @param distributedJoins Query distributed join mode.
* @param enforceJoinOrder Enforce join order H2 flag.
* @param replicated Replicated only flag.
* @param timeout Query timeout.
* @param params Query parameters.
* @param lazy Streaming flag.
* @param mvccSnapshot MVCC snapshot.
* @param dataPageScanEnabled If data page scan is enabled.
*/
private void onQueryRequest0(final ClusterNode node, final long qryId, final long reqId, final int segmentId, final String schemaName, final Collection<GridCacheSqlQuery> qrys, final List<Integer> cacheIds, final AffinityTopologyVersion topVer, final Map<UUID, int[]> partsMap, final int[] parts, final int pageSize, final boolean distributedJoins, final boolean enforceJoinOrder, final boolean replicated, final int timeout, final Object[] params, boolean lazy, @Nullable final MvccSnapshot mvccSnapshot, Boolean dataPageScanEnabled, boolean treatReplicatedAsPartitioned) {
boolean performanceStatsEnabled = ctx.performanceStatistics().enabled();
if (performanceStatsEnabled)
IoStatisticsQueryHelper.startGatheringQueryStatistics();
// Prepare to run queries.
GridCacheContext<?, ?> mainCctx = mainCacheContext(cacheIds);
MapNodeResults nodeRess = resultsForNode(node.id());
MapQueryResults qryResults = null;
PartitionReservation reserved = null;
QueryContext qctx = null;
// We don't use try with resources on purpose - the catch block must also be executed in the context of this span.
TraceSurroundings trace = MTC.support(ctx.tracing().create(SQL_QRY_EXEC_REQ, MTC.span()).addTag(SQL_QRY_TEXT, () -> qrys.stream().map(GridCacheSqlQuery::query).collect(Collectors.joining("; "))));
try {
if (topVer != null) {
// Reserve primary for topology version or explicit partitions.
reserved = h2.partitionReservationManager().reservePartitions(cacheIds, topVer, parts, node.id(), reqId);
if (reserved.failed()) {
sendRetry(node, reqId, segmentId, reserved.error());
return;
}
}
// Prepare query context.
DistributedJoinContext distributedJoinCtx = null;
if (distributedJoins && !replicated) {
distributedJoinCtx = new DistributedJoinContext(topVer, partsMap, node.id(), reqId, segmentId, pageSize);
}
qctx = new QueryContext(segmentId, h2.backupFilter(topVer, parts, treatReplicatedAsPartitioned), distributedJoinCtx, mvccSnapshot, reserved, true);
qryResults = new MapQueryResults(h2, reqId, qrys.size(), mainCctx, lazy, qctx);
// qctx is set, we have to release reservations inside of it.
reserved = null;
if (distributedJoinCtx != null)
qryCtxRegistry.setShared(node.id(), reqId, qctx);
if (nodeRess.put(reqId, segmentId, qryResults) != null)
throw new IllegalStateException();
if (nodeRess.cancelled(reqId)) {
qryCtxRegistry.clearShared(node.id(), reqId);
nodeRess.cancelRequest(reqId);
throw new QueryCancelledException();
}
// Run queries.
int qryIdx = 0;
boolean evt = mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
for (GridCacheSqlQuery qry : qrys) {
H2PooledConnection conn = h2.connections().connection(schemaName);
H2Utils.setupConnection(conn, qctx, distributedJoins, enforceJoinOrder, lazy);
MapQueryResult res = new MapQueryResult(h2, mainCctx, node.id(), qry, params, conn, log);
qryResults.addResult(qryIdx, res);
try {
res.lock();
// Ensure we are on the target node for this replicated query.
if (qry.node() == null || (segmentId == 0 && qry.node().equals(ctx.localNodeId()))) {
String sql = qry.query();
Collection<Object> params0 = F.asList(qry.parameters(params));
PreparedStatement stmt = conn.prepareStatement(sql, H2StatementCache.queryFlags(distributedJoins, enforceJoinOrder));
H2Utils.bindParameters(stmt, params0);
MapH2QueryInfo qryInfo = new MapH2QueryInfo(stmt, qry.query(), node.id(), qryId, reqId, segmentId);
ResultSet rs = h2.executeSqlQueryWithTimer(stmt, conn, sql, timeout, qryResults.queryCancel(qryIdx), dataPageScanEnabled, qryInfo);
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, qry.query(), null, null, params, node.id(), null));
}
assert rs instanceof JdbcResultSet : rs.getClass();
if (qryResults.cancelled()) {
rs.close();
throw new QueryCancelledException();
}
res.openResult(rs, qryInfo);
final GridQueryNextPageResponse msg = prepareNextPage(nodeRess, node, qryResults, qryIdx, segmentId, pageSize, dataPageScanEnabled);
if (msg != null)
sendNextPage(node, msg);
} else {
assert !qry.isPartitioned();
qryResults.closeResult(qryIdx);
}
qryIdx++;
} finally {
try {
res.unlockTables();
} finally {
res.unlock();
}
}
}
if (!lazy)
qryResults.releaseQueryContext();
} catch (Throwable e) {
if (qryResults != null) {
nodeRess.remove(reqId, segmentId, qryResults);
qryResults.close();
// If a query is cancelled before execution is started partitions have to be released.
if (!lazy || !qryResults.isAllClosed())
qryResults.releaseQueryContext();
} else
releaseReservations(qctx);
if (e instanceof QueryCancelledException)
sendError(node, reqId, e);
else {
SQLException sqlEx = X.cause(e, SQLException.class);
if (sqlEx != null && sqlEx.getErrorCode() == ErrorCode.STATEMENT_WAS_CANCELED)
sendQueryCancel(node, reqId);
else {
GridH2RetryException retryErr = X.cause(e, GridH2RetryException.class);
if (retryErr != null) {
final String retryCause = String.format("Failed to execute non-collocated query (will retry) [localNodeId=%s, rmtNodeId=%s, reqId=%s, " + "errMsg=%s]", ctx.localNodeId(), node.id(), reqId, retryErr.getMessage());
sendRetry(node, reqId, segmentId, retryCause);
} else {
QueryRetryException qryRetryErr = X.cause(e, QueryRetryException.class);
if (qryRetryErr != null)
sendError(node, reqId, qryRetryErr);
else {
if (e instanceof Error) {
U.error(log, "Failed to execute local query.", e);
throw (Error) e;
}
U.warn(log, "Failed to execute local query.", e);
sendError(node, reqId, e);
}
}
}
}
} finally {
if (reserved != null)
reserved.release();
if (trace != null)
trace.close();
if (performanceStatsEnabled) {
IoStatisticsHolder stat = IoStatisticsQueryHelper.finishGatheringQueryStatistics();
if (stat.logicalReads() > 0 || stat.physicalReads() > 0) {
ctx.performanceStatistics().queryReads(GridCacheQueryType.SQL_FIELDS, node.id(), reqId, stat.logicalReads(), stat.physicalReads());
}
}
}
}
use of org.apache.ignite.cache.query.QueryRetryException in project ignite by apache.
the class GridH2Table method lock.
/**
* {@inheritDoc}
*/
@Override
public boolean lock(Session ses, boolean exclusive, boolean force) {
// In accordance with base method semantics, we'll return true if we were already exclusively locked.
SessionLock sesLock = sessions.get(ses);
if (sesLock != null) {
if (sesLock.isExclusive())
return true;
if (ver.get() != sesLock.version())
throw new QueryRetryException(getName());
return false;
}
// Acquire the lock.
lock(exclusive, true);
if (destroyed) {
unlock(exclusive);
throw new IllegalStateException("Table " + identifierString() + " already destroyed.");
}
// Mutate state.
sessions.put(ses, exclusive ? SessionLock.exclusiveLock() : SessionLock.sharedLock(ver.longValue()));
ses.addLock(this);
return false;
}
use of org.apache.ignite.cache.query.QueryRetryException in project ignite by apache.
the class AbstractQueryTableLockAndConnectionPoolSelfTest method checkTablesLockQueryAndDropTable.
/**
* @param node Ignite node to execute query.
* @throws Exception If failed.
*/
private void checkTablesLockQueryAndDropTable(final Ignite node) throws Exception {
execute(node, new SqlFieldsQuery("CREATE TABLE IF NOT EXISTS TEST (ID INT PRIMARY KEY, VAL INT)")).getAll();
final AtomicBoolean end = new AtomicBoolean(false);
final int qryThreads = 10;
// Do many concurrent queries.
IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
@Override
public void run() {
while (!end.get()) {
try {
FieldsQueryCursor<List<?>> cursor = execute(node, new SqlFieldsQuery("SELECT * FROM TEST").setLazy(lazy()));
cursor.getAll();
} catch (Exception e) {
String msg = e.getMessage();
if (msg != null && (msg.contains("Failed to find cache") || msg.contains("Failed to perform cache operation (cache is stopped)") || msg.contains("Failed to parse query. Table \"TEST\" not found") || msg.contains("Cache not found on local node (was concurrently destroyed?)") || msg.contains("Getting affinity for too old topology version that is already out of history") || msg.contains("Failed to find partitioned cache") || msg.contains("Table \"TEST\" not found") || msg.contains("Table not found") || msg.contains("Table PUBLIC.TEST already destroyed"))) {
// Swallow exception when table is dropped.
} else if (X.cause(e, IgniteInterruptedCheckedException.class) != null) {
// Swallow exception when table is dropped.
} else // TODO: remove after https://issues.apache.org/jira/browse/IGNITE-15796
if (X.cause(e, NullPointerException.class) != null) {
// Swallow exception when table is dropped.
} else if (X.cause(e, CacheException.class) != null) {
// Swallow exception when table is dropped.
} else if (X.cause(e, QueryRetryException.class) == null) {
log.error("Unexpected exception", e);
fail("Unexpected exception. " + e);
} else if (!lazy()) {
log.error("Unexpected exception", e);
fail("Unexpected QueryRetryException.");
}
}
}
}
}, qryThreads, "usr-qry");
long tEnd = U.currentTimeMillis() + TEST_DUR;
while (U.currentTimeMillis() < tEnd) {
execute(node, new SqlFieldsQuery("DROP TABLE TEST")).getAll();
// Small delay after drop
U.sleep(10);
execute(node, new SqlFieldsQuery("CREATE TABLE TEST (ID INT PRIMARY KEY, VAL INT)")).getAll();
}
// Test is OK in case DDL operations is passed on hi load queries pressure.
end.set(true);
fut.get();
checkConnectionLeaks(Ignition.allGrids().size());
}
use of org.apache.ignite.cache.query.QueryRetryException in project ignite by apache.
the class AbstractQueryTableLockAndConnectionPoolSelfTest method checkBaseOperations.
/**
* Check base operations.
*
* @param node Node.
* @throws Exception If failed.
*/
private void checkBaseOperations(Ignite node) throws Exception {
checkQuerySplitToSeveralMapQueries(node);
// Get full data.
{
List<List<?>> rows = execute(node, baseQuery()).getAll();
assertBaseQueryResults(rows);
}
// Check QueryRetryException is thrown
{
List<List<?>> rows = new ArrayList<>();
FieldsQueryCursor<List<?>> cursor = execute(node, baseQuery().setPageSize(PAGE_SIZE_SMALL));
Iterator<List<?>> it = cursor.iterator();
for (int i = 0; i < 10; ++i) rows.add(it.next());
execute(node, new SqlFieldsQuery("CREATE INDEX \"pers\".PERSON_NAME ON \"pers\".Person (name asc)")).getAll();
execute(node, new SqlFieldsQuery("DROP INDEX \"pers\".PERSON_NAME")).getAll();
try {
while (it.hasNext()) rows.add(it.next());
if (lazy())
fail("Retry exception must be thrown");
} catch (Exception e) {
if (!lazy() || X.cause(e, QueryRetryException.class) == null) {
log.error("Invalid exception: ", e);
fail("QueryRetryException is expected");
}
}
}
// Get data in several pages.
{
List<List<?>> rows = execute(node, baseQuery().setPageSize(PAGE_SIZE_SMALL)).getAll();
assertBaseQueryResults(rows);
}
// Test full iteration.
{
List<List<?>> rows = new ArrayList<>();
FieldsQueryCursor<List<?>> cursor = execute(node, baseQuery().setPageSize(PAGE_SIZE_SMALL));
for (List<?> row : cursor) rows.add(row);
cursor.close();
assertBaseQueryResults(rows);
}
// Test partial iteration with cursor close.
try (FieldsQueryCursor<List<?>> partialCursor = execute(node, baseQuery().setPageSize(PAGE_SIZE_SMALL))) {
Iterator<List<?>> iter = partialCursor.iterator();
for (int i = 0; i < 30; i++) iter.next();
}
// Test execution of multiple queries at a time.
List<Iterator<List<?>>> iters = new ArrayList<>();
for (int i = 0; i < 200; i++) iters.add(execute(node, randomizedQuery().setPageSize(PAGE_SIZE_SMALL)).iterator());
while (!iters.isEmpty()) {
Iterator<Iterator<List<?>>> iterIter = iters.iterator();
while (iterIter.hasNext()) {
Iterator<List<?>> iter = iterIter.next();
int i = 0;
while (iter.hasNext() && i < 20) {
iter.next();
i++;
}
if (!iter.hasNext())
iterIter.remove();
}
}
checkConnectionLeaks(Ignition.allGrids().size());
checkHoldQuery(node);
checkShortQuery(node);
}
use of org.apache.ignite.cache.query.QueryRetryException in project ignite by apache.
the class AbstractQueryTableLockAndConnectionPoolSelfTest method checkTablesLockQueryAndDDLMultithreaded.
/**
* @param node Ignite node to execute query.
* @throws Exception If failed.
*/
private void checkTablesLockQueryAndDDLMultithreaded(final Ignite node) throws Exception {
final AtomicBoolean end = new AtomicBoolean(false);
final int qryThreads = 10;
// Do many concurrent queries.
IgniteInternalFuture<Long> fut = GridTestUtils.runMultiThreadedAsync(new Runnable() {
@Override
public void run() {
while (!end.get()) {
try {
FieldsQueryCursor<List<?>> cursor = execute(node, new SqlFieldsQueryEx("SELECT pers.id, pers.name " + "FROM (SELECT DISTINCT p.id, p.name " + "FROM \"pers\".PERSON as p) as pers " + "JOIN \"pers\".PERSON p on p.id = pers.id " + "JOIN (SELECT t.persId as persId, SUM(t.time) totalTime " + "FROM \"persTask\".PersonTask as t GROUP BY t.persId) as task ON task.persId = pers.id", true).setLazy(lazy()).setLocal(local).setPageSize(PAGE_SIZE_SMALL));
cursor.getAll();
} catch (Exception e) {
if (X.cause(e, QueryRetryException.class) == null) {
log.error("Unexpected exception", e);
fail("Unexpected exception. " + e);
} else if (!lazy()) {
log.error("Unexpected exception", e);
fail("Unexpected QueryRetryException.");
}
}
}
}
}, qryThreads, "usr-qry");
long tEnd = U.currentTimeMillis() + TEST_DUR;
while (U.currentTimeMillis() < tEnd) {
execute(node, new SqlFieldsQuery("CREATE INDEX \"pers\".PERSON_NAME ON \"pers\".Person (name asc)")).getAll();
execute(node, new SqlFieldsQuery("DROP INDEX \"pers\".PERSON_NAME")).getAll();
}
// Test is OK in case DDL operations is passed on hi load queries pressure.
end.set(true);
fut.get();
checkConnectionLeaks(Ignition.allGrids().size());
}
Aggregations