use of org.apache.ignite.internal.processors.query.EnlistOperation in project ignite by apache.
the class GridNearTxEnlistFuture method processBatchLocalBackupKeys.
/**
* @param primaryId Primary node id.
* @param rows Rows.
* @param dhtVer Dht version assigned at primary node.
* @param dhtFutId Dht future id assigned at primary node.
*/
private void processBatchLocalBackupKeys(UUID primaryId, List<Object> rows, GridCacheVersion dhtVer, IgniteUuid dhtFutId) {
assert dhtVer != null;
assert dhtFutId != null;
EnlistOperation op = it.operation();
assert op != EnlistOperation.LOCK;
boolean keysOnly = op.isDeleteOrLock();
final ArrayList<KeyCacheObject> keys = new ArrayList<>(rows.size());
final ArrayList<Message> vals = keysOnly ? null : new ArrayList<>(rows.size());
for (Object row : rows) {
if (keysOnly)
keys.add(cctx.toCacheKeyObject(row));
else {
keys.add(cctx.toCacheKeyObject(((IgniteBiTuple) row).getKey()));
if (op.isInvoke())
vals.add((Message) ((IgniteBiTuple) row).getValue());
else
vals.add(cctx.toCacheObject(((IgniteBiTuple) row).getValue()));
}
}
try {
GridDhtTxRemote dhtTx = cctx.tm().tx(dhtVer);
if (dhtTx == null) {
dhtTx = new GridDhtTxRemote(cctx.shared(), cctx.localNodeId(), dhtFutId, primaryId, lockVer, topVer, dhtVer, null, cctx.systemTx(), cctx.ioPolicy(), PESSIMISTIC, REPEATABLE_READ, false, tx.remainingTime(), -1, SecurityUtils.securitySubjectId(cctx), tx.taskNameHash(), false, null);
dhtTx.mvccSnapshot(new MvccSnapshotWithoutTxs(mvccSnapshot.coordinatorVersion(), mvccSnapshot.counter(), MVCC_OP_COUNTER_NA, mvccSnapshot.cleanupVersion()));
dhtTx = cctx.tm().onCreated(null, dhtTx);
if (dhtTx == null || !cctx.tm().onStarted(dhtTx)) {
throw new IgniteTxRollbackCheckedException("Failed to update backup " + "(transaction has been completed): " + dhtVer);
}
}
cctx.tm().txHandler().mvccEnlistBatch(dhtTx, cctx, it.operation(), keys, vals, mvccSnapshot.withoutActiveTransactions(), null, -1);
} catch (IgniteCheckedException e) {
onDone(e);
return;
}
sendNextBatches(primaryId);
}
use of org.apache.ignite.internal.processors.query.EnlistOperation in project ignite by apache.
the class GridNearTxLocal method mvccPutAllAsync0.
/**
* Internal method for put and transform operations in Mvcc mode.
* Note: Only one of {@code map}, {@code transformMap} maps must be non-null.
*
* @param cacheCtx Context.
* @param map Key-value map to store.
* @param invokeMap Invoke map.
* @param invokeArgs Optional arguments for EntryProcessor.
* @param retval Key-transform value map to store.
* @param filter Filter.
* @return Operation future.
*/
private <K, V> IgniteInternalFuture mvccPutAllAsync0(final GridCacheContext cacheCtx, @Nullable Map<? extends K, ? extends V> map, @Nullable Map<? extends K, ? extends EntryProcessor<K, V, Object>> invokeMap, @Nullable final Object[] invokeArgs, final boolean retval, @Nullable final CacheEntryPredicate filter) {
try {
MvccUtils.requestSnapshot(this);
beforePut(cacheCtx, retval, true);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture(e);
}
if (log.isDebugEnabled())
log.debug("Called putAllAsync(...) [tx=" + this + ", map=" + map + ", retval=" + retval + "]");
assert map != null || invokeMap != null;
if (F.isEmpty(map) && F.isEmpty(invokeMap)) {
if (implicit())
try {
commit();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
return new GridFinishedFuture<>(new GridCacheReturn(true, false));
}
// Set transform flag for operation.
boolean transform = invokeMap != null;
try {
Set<?> keys = map != null ? map.keySet() : invokeMap.keySet();
final Map<KeyCacheObject, Object> enlisted = new LinkedHashMap<>(keys.size());
for (Object key : keys) {
if (isRollbackOnly())
return new GridFinishedFuture<>(timedOut() ? timeoutException() : rollbackException());
if (key == null) {
rollback();
throw new NullPointerException("Null key.");
}
Object val = map == null ? null : map.get(key);
EntryProcessor entryProcessor = transform ? invokeMap.get(key) : null;
if (val == null && entryProcessor == null) {
setRollbackOnly();
throw new NullPointerException("Null value.");
}
KeyCacheObject cacheKey = cacheCtx.toCacheKeyObject(key);
if (transform)
enlisted.put(cacheKey, new GridInvokeValue(entryProcessor, invokeArgs));
else
enlisted.put(cacheKey, val);
}
return updateAsync(cacheCtx, new UpdateSourceIterator<IgniteBiTuple<KeyCacheObject, Object>>() {
private final Iterator<Map.Entry<KeyCacheObject, Object>> it = enlisted.entrySet().iterator();
@Override
public EnlistOperation operation() {
return transform ? EnlistOperation.TRANSFORM : EnlistOperation.UPSERT;
}
@Override
public boolean hasNextX() throws IgniteCheckedException {
return it.hasNext();
}
@Override
public IgniteBiTuple<KeyCacheObject, Object> nextX() throws IgniteCheckedException {
Map.Entry<KeyCacheObject, Object> next = it.next();
return new IgniteBiTuple<>(next.getKey(), next.getValue());
}
}, retval, filter, remainingTime(), true);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture(e);
} catch (RuntimeException e) {
onException();
throw e;
}
}
use of org.apache.ignite.internal.processors.query.EnlistOperation in project ignite by apache.
the class IgniteH2Indexing method executeUpdateTransactional.
/**
* Execute update in transactional mode.
*
* @param qryId Query id.
* @param qryDesc Query descriptor.
* @param qryParams Query parameters.
* @param dml Plan.
* @param loc Local flag.
* @param cancel Cancel hook.
* @return Update result.
* @throws IgniteCheckedException If failed.
*/
private UpdateResult executeUpdateTransactional(long qryId, QueryDescriptor qryDesc, QueryParameters qryParams, QueryParserResultDml dml, boolean loc, GridQueryCancel cancel) throws IgniteCheckedException {
UpdatePlan plan = dml.plan();
GridCacheContext cctx = plan.cacheContext();
assert cctx != null;
assert cctx.transactional();
GridNearTxLocal tx = tx(ctx);
boolean implicit = (tx == null);
boolean commit = implicit && qryParams.autoCommit();
if (implicit)
tx = txStart(cctx, qryParams.timeout());
requestSnapshot(tx);
try (GridNearTxLocal toCommit = commit ? tx : null) {
DmlDistributedPlanInfo distributedPlan = loc ? null : plan.distributedPlan();
long timeout = implicit ? tx.remainingTime() : operationTimeout(qryParams.timeout(), tx);
if (cctx.isReplicated() || distributedPlan == null || ((plan.mode() == UpdateMode.INSERT || plan.mode() == UpdateMode.MERGE) && !plan.isLocalSubquery())) {
boolean sequential = true;
UpdateSourceIterator<?> it;
if (plan.fastResult()) {
IgniteBiTuple row = plan.getFastRow(qryParams.arguments());
assert row != null;
EnlistOperation op = UpdatePlan.enlistOperation(plan.mode());
it = new DmlUpdateSingleEntryIterator<>(op, op.isDeleteOrLock() ? row.getKey() : row);
} else if (plan.hasRows()) {
it = new DmlUpdateResultsIterator(UpdatePlan.enlistOperation(plan.mode()), plan, plan.createRows(qryParams.arguments()));
} else {
SqlFieldsQuery selectFieldsQry = new SqlFieldsQuery(plan.selectQuery(), qryDesc.collocated()).setArgs(qryParams.arguments()).setDistributedJoins(qryDesc.distributedJoins()).setEnforceJoinOrder(qryDesc.enforceJoinOrder()).setLocal(qryDesc.local()).setPageSize(qryParams.pageSize()).setTimeout((int) timeout, TimeUnit.MILLISECONDS).setLazy(qryParams.lazy());
FieldsQueryCursor<List<?>> cur = executeSelectForDml(qryId, qryDesc.schemaName(), selectFieldsQry, MvccUtils.mvccTracker(cctx, tx), cancel, (int) timeout);
it = plan.iteratorForTransaction(connMgr, cur);
}
// TODO: IGNITE-11176 - Need to support cancellation
IgniteInternalFuture<Long> fut = tx.updateAsync(cctx, it, qryParams.pageSize(), timeout, sequential);
UpdateResult res = new UpdateResult(fut.get(), X.EMPTY_OBJECT_ARRAY, plan.distributedPlan() != null ? plan.distributedPlan().derivedPartitions() : null);
if (commit)
toCommit.commit();
return res;
}
int[] ids = U.toIntArray(distributedPlan.getCacheIds());
int flags = 0;
if (qryDesc.enforceJoinOrder())
flags |= GridH2QueryRequest.FLAG_ENFORCE_JOIN_ORDER;
if (distributedPlan.isReplicatedOnly())
flags |= GridH2QueryRequest.FLAG_REPLICATED;
if (qryParams.lazy())
flags |= GridH2QueryRequest.FLAG_LAZY;
flags = GridH2QueryRequest.setDataPageScanEnabled(flags, qryParams.dataPageScanEnabled());
int[] parts = PartitionResult.calculatePartitions(qryParams.partitions(), distributedPlan.derivedPartitions(), qryParams.arguments());
if (parts != null && parts.length == 0)
return new UpdateResult(0, X.EMPTY_OBJECT_ARRAY, distributedPlan.derivedPartitions());
else {
// TODO: IGNITE-11176 - Need to support cancellation
IgniteInternalFuture<Long> fut = tx.updateAsync(cctx, ids, parts, qryDesc.schemaName(), qryDesc.sql(), qryParams.arguments(), flags, qryParams.pageSize(), timeout);
UpdateResult res = new UpdateResult(fut.get(), X.EMPTY_OBJECT_ARRAY, distributedPlan.derivedPartitions());
if (commit)
toCommit.commit();
return res;
}
} catch (ClusterTopologyServerNotFoundException e) {
throw new CacheServerNotFoundException(e.getMessage(), e);
} catch (IgniteCheckedException e) {
IgniteSQLException sqlEx = X.cause(e, IgniteSQLException.class);
if (sqlEx != null)
throw sqlEx;
Exception ex = IgniteUtils.convertExceptionNoWrap(e);
if (ex instanceof IgniteException)
throw (IgniteException) ex;
U.error(log, "Error during update [localNodeId=" + ctx.localNodeId() + "]", ex);
throw new IgniteSQLException("Failed to run update. " + ex.getMessage(), ex);
} finally {
if (commit)
cctx.tm().resetContext();
}
}
Aggregations