use of org.apache.ignite.internal.processors.query.h2.UpdateResult in project ignite by apache.
the class DmlStatementsProcessor method executeUpdateStatement.
/**
* Actually perform SQL DML operation locally.
*
* @param schemaName Schema name.
* @param cctx Cache context.
* @param prepStmt Prepared statement for DML query.
* @param fieldsQry Fields query.
* @param filters Cache name and key filter.
* @param failedKeys Keys to restrict UPDATE and DELETE operations with. Null or empty array means no restriction.
* @return Pair [number of successfully processed items; keys that have failed to be processed]
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "ConstantConditions", "unchecked" })
private UpdateResult executeUpdateStatement(String schemaName, final GridCacheContext cctx, PreparedStatement prepStmt, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel, Object[] failedKeys) throws IgniteCheckedException {
int mainCacheId = CU.cacheId(cctx.name());
Integer errKeysPos = null;
UpdatePlan plan = getPlanForStatement(schemaName, prepStmt, errKeysPos);
if (plan.fastUpdateArgs != null) {
assert F.isEmpty(failedKeys) && errKeysPos == null;
return doFastUpdate(plan, fieldsQry.getArgs());
}
assert !F.isEmpty(plan.selectQry);
QueryCursorImpl<List<?>> cur;
// subquery and not some dummy stuff like "select 1, 2, 3;"
if (!loc && !plan.isLocSubqry) {
SqlFieldsQuery newFieldsQry = new SqlFieldsQuery(plan.selectQry, fieldsQry.isCollocated()).setArgs(fieldsQry.getArgs()).setDistributedJoins(fieldsQry.isDistributedJoins()).setEnforceJoinOrder(fieldsQry.isEnforceJoinOrder()).setLocal(fieldsQry.isLocal()).setPageSize(fieldsQry.getPageSize()).setTimeout(fieldsQry.getTimeout(), TimeUnit.MILLISECONDS);
cur = (QueryCursorImpl<List<?>>) idx.queryDistributedSqlFields(schemaName, newFieldsQry, true, cancel, mainCacheId);
} else {
final GridQueryFieldsResult res = idx.queryLocalSqlFields(schemaName, plan.selectQry, F.asList(fieldsQry.getArgs()), filters, fieldsQry.isEnforceJoinOrder(), fieldsQry.getTimeout(), cancel);
cur = new QueryCursorImpl<>(new Iterable<List<?>>() {
@Override
public Iterator<List<?>> iterator() {
try {
return new GridQueryCacheObjectsIterator(res.iterator(), idx.objectContext(), true);
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}, cancel);
}
int pageSize = loc ? 0 : fieldsQry.getPageSize();
switch(plan.mode) {
case MERGE:
return new UpdateResult(doMerge(plan, cur, pageSize), X.EMPTY_OBJECT_ARRAY);
case INSERT:
return new UpdateResult(doInsert(plan, cur, pageSize), X.EMPTY_OBJECT_ARRAY);
case UPDATE:
return doUpdate(plan, cur, pageSize);
case DELETE:
return doDelete(cctx, cur, pageSize);
default:
throw new IgniteSQLException("Unexpected DML operation [mode=" + plan.mode + ']', IgniteQueryErrorCode.UNEXPECTED_OPERATION);
}
}
use of org.apache.ignite.internal.processors.query.h2.UpdateResult in project ignite by apache.
the class DmlStatementsProcessor method updateSqlFields.
/**
* Execute DML statement, possibly with few re-attempts in case of concurrent data modifications.
*
* @param schemaName Schema.
* @param stmt JDBC statement.
* @param fieldsQry Original query.
* @param loc Query locality flag.
* @param filters Cache name and key filter.
* @param cancel Cancel.
* @return Update result (modified items count and failed keys).
* @throws IgniteCheckedException if failed.
*/
private UpdateResult updateSqlFields(String schemaName, PreparedStatement stmt, SqlFieldsQuery fieldsQry, boolean loc, IndexingQueryFilter filters, GridQueryCancel cancel) throws IgniteCheckedException {
Object[] errKeys = null;
long items = 0;
UpdatePlan plan = getPlanForStatement(schemaName, stmt, null);
GridCacheContext<?, ?> cctx = plan.tbl.rowDescriptor().context();
for (int i = 0; i < DFLT_DML_RERUN_ATTEMPTS; i++) {
CacheOperationContext opCtx = cctx.operationContextPerCall();
// Force keepBinary for operation context to avoid binary deserialization inside entry processor
if (cctx.binaryMarshaller()) {
CacheOperationContext newOpCtx = null;
if (opCtx == null)
// Mimics behavior of GridCacheAdapter#keepBinary and GridCacheProxyImpl#keepBinary
newOpCtx = new CacheOperationContext(false, null, true, null, false, null, false);
else if (!opCtx.isKeepBinary())
newOpCtx = opCtx.keepBinary();
if (newOpCtx != null)
cctx.operationContextPerCall(newOpCtx);
}
UpdateResult r;
try {
r = executeUpdateStatement(schemaName, cctx, stmt, fieldsQry, loc, filters, cancel, errKeys);
} finally {
cctx.operationContextPerCall(opCtx);
}
items += r.cnt;
errKeys = r.errKeys;
if (F.isEmpty(errKeys))
break;
}
if (F.isEmpty(errKeys)) {
if (items == 1L)
return UpdateResult.ONE;
else if (items == 0L)
return UpdateResult.ZERO;
}
return new UpdateResult(items, errKeys);
}
use of org.apache.ignite.internal.processors.query.h2.UpdateResult in project ignite by apache.
the class GridReduceQueryExecutor method update.
/**
* @param schemaName Schema name.
* @param cacheIds Cache ids.
* @param selectQry Select query.
* @param params SQL parameters.
* @param enforceJoinOrder Enforce join order of tables.
* @param pageSize Page size.
* @param timeoutMillis Timeout.
* @param parts Partitions.
* @param isReplicatedOnly Whether query uses only replicated caches.
* @param cancel Cancel state.
* @return Update result, or {@code null} when some map node doesn't support distributed DML.
*/
public UpdateResult update(String schemaName, List<Integer> cacheIds, String selectQry, Object[] params, boolean enforceJoinOrder, int pageSize, int timeoutMillis, final int[] parts, boolean isReplicatedOnly, GridQueryCancel cancel) {
AffinityTopologyVersion topVer = h2.readyTopologyVersion();
NodesForPartitionsResult nodesParts = nodesForPartitions(cacheIds, topVer, parts, isReplicatedOnly);
final long reqId = qryIdGen.incrementAndGet();
final GridRunningQueryInfo qryInfo = new GridRunningQueryInfo(reqId, selectQry, GridCacheQueryType.SQL_FIELDS, schemaName, U.currentTimeMillis(), cancel, false);
Collection<ClusterNode> nodes = nodesParts.nodes();
if (nodes == null)
throw new CacheException("Failed to determine nodes participating in the update. " + "Explanation (Retry update once topology recovers).");
if (isReplicatedOnly) {
ClusterNode locNode = ctx.discovery().localNode();
if (nodes.contains(locNode))
nodes = singletonList(locNode);
else
nodes = singletonList(F.rand(nodes));
}
for (ClusterNode n : nodes) {
if (!n.version().greaterThanEqual(2, 3, 0)) {
log.warning("Server-side DML optimization is skipped because map node does not support it. " + "Falling back to normal DML. [node=" + n.id() + ", v=" + n.version() + "].");
return null;
}
}
final DistributedUpdateRun r = new DistributedUpdateRun(nodes.size(), qryInfo);
int flags = enforceJoinOrder ? GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER : 0;
if (isReplicatedOnly)
flags |= GridH2QueryRequest.FLAG_REPLICATED;
GridH2DmlRequest req = new GridH2DmlRequest().requestId(reqId).topologyVersion(topVer).caches(cacheIds).schemaName(schemaName).query(selectQry).pageSize(pageSize).parameters(params).timeout(timeoutMillis).flags(flags);
updRuns.put(reqId, r);
boolean release = false;
try {
Map<ClusterNode, IntArray> partsMap = (nodesParts.queryPartitionsMap() != null) ? nodesParts.queryPartitionsMap() : nodesParts.partitionsMap();
ExplicitPartitionsSpecializer partsSpec = (parts == null) ? null : new ExplicitPartitionsSpecializer(partsMap);
final Collection<ClusterNode> finalNodes = nodes;
cancel.set(new Runnable() {
@Override
public void run() {
r.future().onCancelled();
send(finalNodes, new GridQueryCancelRequest(reqId), null, false);
}
});
// send() logs the debug message
if (send(nodes, req, partsSpec, false))
return r.future().get();
throw new CacheException("Failed to send update request to participating nodes.");
} catch (IgniteCheckedException | RuntimeException e) {
release = true;
U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", e);
throw new CacheException("Failed to run update. " + e.getMessage(), e);
} finally {
if (release)
send(nodes, new GridQueryCancelRequest(reqId), null, false);
if (!updRuns.remove(reqId, r))
U.warn(log, "Update run was already removed: " + reqId);
}
}
use of org.apache.ignite.internal.processors.query.h2.UpdateResult in project ignite by apache.
the class DistributedUpdateRun method handleResponse.
/**
* Handle response from remote node.
*
* @param id Node id.
* @param msg Response message.
*/
void handleResponse(UUID id, GridH2DmlResponse msg) {
synchronized (this) {
if (!rspNodes.add(id))
// ignore duplicated messages
return;
String err = msg.error();
if (err != null) {
fut.onDone(new IgniteCheckedException("Update failed. " + (F.isEmpty(err) ? "" : err) + "[reqId=" + msg.requestId() + ", node=" + id + "]."));
return;
}
if (!F.isEmpty(msg.errorKeys())) {
List<Object> errList = Arrays.asList(msg.errorKeys());
if (errorKeys == null)
errorKeys = new HashSet<>(errList);
else
errorKeys.addAll(errList);
}
updCntr += msg.updateCounter();
if (rspNodes.size() == nodeCount)
fut.onDone(new UpdateResult(updCntr, errorKeys == null ? null : errorKeys.toArray()));
}
}
use of org.apache.ignite.internal.processors.query.h2.UpdateResult in project ignite by apache.
the class GridMapQueryExecutor method sendUpdateResponse.
/**
* Sends update response for DML request.
*
* @param node Node.
* @param reqId Request id.
* @param updResult Update result.
* @param error Error message.
*/
@SuppressWarnings("deprecation")
private void sendUpdateResponse(ClusterNode node, long reqId, UpdateResult updResult, String error) {
try {
GridH2DmlResponse rsp = new GridH2DmlResponse(reqId, updResult == null ? 0 : updResult.counter(), updResult == null ? null : updResult.errorKeys(), error);
if (log.isDebugEnabled())
log.debug("Sending: [localNodeId=" + ctx.localNodeId() + ", node=" + node.id() + ", msg=" + rsp + "]");
if (node.isLocal())
h2.reduceQueryExecutor().onMessage(ctx.localNodeId(), rsp);
else {
rsp.marshall(ctx.config().getMarshaller());
ctx.io().sendToGridTopic(node, GridTopic.TOPIC_QUERY, rsp, QUERY_POOL);
}
} catch (Exception e) {
U.error(log, "Failed to send message.", e);
}
}
Aggregations