use of org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest in project ignite by apache.
the class GridReduceQueryExecutor method update.
/**
* @param schemaName Schema name.
* @param cacheIds Cache ids.
* @param selectQry Select query.
* @param params SQL parameters.
* @param enforceJoinOrder Enforce join order of tables.
* @param pageSize Page size.
* @param timeoutMillis Timeout.
* @param parts Partitions.
* @param isReplicatedOnly Whether query uses only replicated caches.
* @param cancel Cancel state.
* @return Update result, or {@code null} when some map node doesn't support distributed DML.
*/
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
final long reqId = qryIdGen.incrementAndGet();
final GridRunningQueryInfo qryInfo = new GridRunningQueryInfo(reqId, selectQry, GridCacheQueryType.SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, false);
Collection<ClusterNode> nodes = nodesParts.nodes();
if (nodes == null)
throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
if (isReplicatedOnly) {
ClusterNode locNode = ctx.discovery().localNode();
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else
nodes = singletonList(F.rand(nodes));
}
for (ClusterNode n : nodes) {
if (!n.version().greaterThanEqual(2, 3, 0)) {
log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
return null;
}
}
final DistributedUpdateRun r = new DistributedUpdateRun(nodes.size(), qryInfo);
int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).flags(flags);
updRuns.put(reqId, r);
boolean release = false;
try {
Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
ExplicitPartitionsSpecializer partsSpec = (parts == null) ? null : new ExplicitPartitionsSpecializer(partsMap);
final Collection<ClusterNode> finalNodes = nodes;
cancel.set(new Runnable() {
@Override
public void run() {
r.future().onCancelled();
send(finalNodes, new GridQueryCancelRequest(reqId), null, false);
}
});
// send() logs the debug message
if (send(nodes, req, partsSpec, false))
return r.future().get();
throw new CacheException("Failed to send update request to participating nodes.");
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
throw new CacheException("Failed to run update. " + e.getMessage(), e);
} finally {
if (release)
send(nodes, new GridQueryCancelRequest(reqId), null, false);
if (!updRuns.remove(reqId, r))
U.warn(log, "Update run was already removed: " + reqId);
}
}
use of org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest in project ignite by apache.
the class GridMapQueryExecutor method onMessage.
/**
* @param nodeId Node ID.
* @param msg Message.
*/
public void onMessage(UUID nodeId, Object msg) {
try {
assert msg != null;
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null)
// Node left, ignore.
return;
boolean processed = true;
if (msg instanceof GridH2QueryRequest)
onQueryRequest(node, (GridH2QueryRequest) msg);
else if (msg instanceof GridQueryNextPageRequest)
onNextPageRequest(node, (GridQueryNextPageRequest) msg);
else if (msg instanceof GridQueryCancelRequest)
onCancel(node, (GridQueryCancelRequest) msg);
else if (msg instanceof GridH2DmlRequest)
onDmlRequest(node, (GridH2DmlRequest) msg);
else
processed = false;
if (processed && log.isDebugEnabled())
log.debug("Processed request: " + nodeId + "->" + ctx.localNodeId() + " " + msg);
} catch (Throwable th) {
U.error(log, "Failed to process message: " + msg, th);
}
}
use of org.apache.ignite.internal.processors.query.h2.twostep.msg.GridH2DmlRequest in project ignite by apache.
the class GridMapQueryExecutor method onDmlRequest.
/**
* @param node Node.
* @param req DML request.
*/
private void onDmlRequest(final ClusterNode node, final GridH2DmlRequest req) throws IgniteCheckedException {
int[] parts = req.queryPartitions();
List<Integer> cacheIds = req.caches();
long reqId = req.requestId();
AffinityTopologyVersion topVer = req.topologyVersion();
List<GridReservable> reserved = new ArrayList<>();
if (!reservePartitions(cacheIds, topVer, parts, reserved)) {
U.error(log, "Failed to reserve partitions for DML request. [localNodeId=" + ctx.localNodeId() + ", nodeId=" + node.id() + ", reqId=" + req.requestId() + ", cacheIds=" + cacheIds + ", topVer=" + topVer + ", parts=" + Arrays.toString(parts) + ']');
sendUpdateResponse(node, reqId, null, "Failed to reserve partitions for DML request. " + "Explanation (Retry your request when re-balancing is over).");
return;
}
MapNodeResults nodeResults = resultsForNode(node.id());
try {
IndexingQueryFilter filter = h2.backupFilter(topVer, parts);
GridQueryCancel cancel = nodeResults.putUpdate(reqId);
SqlFieldsQuery fldsQry = new SqlFieldsQuery(req.query());
if (req.parameters() != null)
fldsQry.setArgs(req.parameters());
fldsQry.setEnforceJoinOrder(req.isFlagSet(GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER));
fldsQry.setTimeout(req.timeout(), TimeUnit.MILLISECONDS);
fldsQry.setPageSize(req.pageSize());
fldsQry.setLocal(true);
boolean local = true;
final boolean replicated = req.isFlagSet(GridH2QueryRequest.FLAG_REPLICATED);
if (!replicated && !F.isEmpty(cacheIds) && findFirstPartitioned(cacheIds).config().getQueryParallelism() > 1) {
fldsQry.setDistributedJoins(true);
local = false;
}
UpdateResult updRes = h2.mapDistributedUpdate(req.schemaName(), fldsQry, filter, cancel, local);
GridCacheContext<?, ?> mainCctx = !F.isEmpty(cacheIds) ? ctx.cache().context().cacheContext(cacheIds.get(0)) : null;
boolean evt = local && mainCctx != null && mainCctx.events().isRecordable(EVT_CACHE_QUERY_EXECUTED);
if (evt) {
ctx.event().record(new CacheQueryExecutedEvent<>(node, "SQL query executed.", EVT_CACHE_QUERY_EXECUTED, CacheQueryType.SQL.name(), mainCctx.name(), null, req.query(), null, null, req.parameters(), node.id(), null));
}
sendUpdateResponse(node, reqId, updRes, null);
} catch (Exception e) {
U.error(log, "Error processing dml request. [localNodeId=" + ctx.localNodeId() + ", nodeId=" + node.id() + ", req=" + req + ']', e);
sendUpdateResponse(node, reqId, null, e.getMessage());
} finally {
if (!F.isEmpty(reserved)) {
// Release reserved partitions.
for (int i = 0; i < reserved.size(); i++) reserved.get(i).release();
}
nodeResults.removeUpdate(reqId);
}
}
Aggregations