use of org.apache.ignite.internal.processors.cache.distributed.dht.GridInvokeValue in project ignite by apache.
the class GridNearTxEnlistRequest method prepareMarshal.
/**
* {@inheritDoc}
*/
@Override
public void prepareMarshal(GridCacheSharedContext ctx) throws IgniteCheckedException {
super.prepareMarshal(ctx);
GridCacheContext cctx = ctx.cacheContext(cacheId);
CacheObjectContext objCtx = cctx.cacheObjectContext();
if (rows != null && keys == null) {
if (!addDepInfo && ctx.deploymentEnabled())
addDepInfo = true;
keys = new KeyCacheObject[rows.size()];
int i = 0;
boolean keysOnly = op.isDeleteOrLock();
values = keysOnly ? null : new Message[keys.length];
for (Object row : rows) {
Object key, val = null;
if (keysOnly)
key = row;
else {
key = ((IgniteBiTuple) row).getKey();
val = ((IgniteBiTuple) row).getValue();
}
assert key != null && (keysOnly || val != null) : "key=" + key + ", val=" + val;
KeyCacheObject key0 = cctx.toCacheKeyObject(key);
assert key0 != null;
key0.prepareMarshal(objCtx);
keys[i] = key0;
if (!keysOnly) {
if (op.isInvoke()) {
GridInvokeValue val0 = (GridInvokeValue) val;
prepareInvokeValue(cctx, val0);
values[i] = val0;
} else {
if (addDepInfo)
prepareObject(val, cctx);
CacheObject val0 = cctx.toCacheObject(val);
assert val0 != null;
val0.prepareMarshal(objCtx);
values[i] = val0;
}
}
i++;
}
}
if (filter != null)
filter.prepareMarshal(cctx);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridInvokeValue in project ignite by apache.
the class GridNearTxLocal method mvccPutAllAsync0.
/**
* Internal method for put and transform operations in Mvcc mode.
* Note: Only one of {@code map}, {@code transformMap} maps must be non-null.
*
* @param cacheCtx Context.
* @param map Key-value map to store.
* @param invokeMap Invoke map.
* @param invokeArgs Optional arguments for EntryProcessor.
* @param retval Key-transform value map to store.
* @param filter Filter.
* @return Operation future.
*/
private <K, V> IgniteInternalFuture mvccPutAllAsync0(final GridCacheContext cacheCtx, @Nullable Map<? extends K, ? extends V> map, @Nullable Map<? extends K, ? extends EntryProcessor<K, V, Object>> invokeMap, @Nullable final Object[] invokeArgs, final boolean retval, @Nullable final CacheEntryPredicate filter) {
try {
MvccUtils.requestSnapshot(this);
beforePut(cacheCtx, retval, true);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture(e);
}
if (log.isDebugEnabled())
log.debug("Called putAllAsync(...) [tx=" + this + ", map=" + map + ", retval=" + retval + "]");
assert map != null || invokeMap != null;
if (F.isEmpty(map) && F.isEmpty(invokeMap)) {
if (implicit())
try {
commit();
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
}
return new GridFinishedFuture<>(new GridCacheReturn(true, false));
}
// Set transform flag for operation.
boolean transform = invokeMap != null;
try {
Set<?> keys = map != null ? map.keySet() : invokeMap.keySet();
final Map<KeyCacheObject, Object> enlisted = new LinkedHashMap<>(keys.size());
for (Object key : keys) {
if (isRollbackOnly())
return new GridFinishedFuture<>(timedOut() ? timeoutException() : rollbackException());
if (key == null) {
rollback();
throw new NullPointerException("Null key.");
}
Object val = map == null ? null : map.get(key);
EntryProcessor entryProcessor = transform ? invokeMap.get(key) : null;
if (val == null && entryProcessor == null) {
setRollbackOnly();
throw new NullPointerException("Null value.");
}
KeyCacheObject cacheKey = cacheCtx.toCacheKeyObject(key);
if (transform)
enlisted.put(cacheKey, new GridInvokeValue(entryProcessor, invokeArgs));
else
enlisted.put(cacheKey, val);
}
return updateAsync(cacheCtx, new UpdateSourceIterator<IgniteBiTuple<KeyCacheObject, Object>>() {
private final Iterator<Map.Entry<KeyCacheObject, Object>> it = enlisted.entrySet().iterator();
@Override
public EnlistOperation operation() {
return transform ? EnlistOperation.TRANSFORM : EnlistOperation.UPSERT;
}
@Override
public boolean hasNextX() throws IgniteCheckedException {
return it.hasNext();
}
@Override
public IgniteBiTuple<KeyCacheObject, Object> nextX() throws IgniteCheckedException {
Map.Entry<KeyCacheObject, Object> next = it.next();
return new IgniteBiTuple<>(next.getKey(), next.getValue());
}
}, retval, filter, remainingTime(), true);
} catch (IgniteCheckedException e) {
return new GridFinishedFuture(e);
} catch (RuntimeException e) {
onException();
throw e;
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridInvokeValue in project ignite by apache.
the class IgniteTxHandler method mvccEnlistBatch.
/**
* Writes updated values on the backup node.
*
* @param tx Transaction.
* @param ctx Cache context.
* @param op Operation.
* @param keys Keys.
* @param vals Values sent from the primary node.
* @param snapshot Mvcc snapshot.
* @param batchNum Batch number.
* @param futId Future id.
* @throws IgniteCheckedException If failed.
*/
public void mvccEnlistBatch(GridDhtTxRemote tx, GridCacheContext ctx, EnlistOperation op, List<KeyCacheObject> keys, List<Message> vals, MvccSnapshot snapshot, IgniteUuid futId, int batchNum) throws IgniteCheckedException {
assert keys != null && (vals == null || vals.size() == keys.size());
assert tx != null;
GridDhtCacheAdapter dht = ctx.dht();
tx.addActiveCache(ctx, false);
for (int i = 0; i < keys.size(); i++) {
KeyCacheObject key = keys.get(i);
assert key != null;
int part = ctx.affinity().partition(key);
try {
GridDhtLocalPartition locPart = ctx.topology().localPartition(part, tx.topologyVersion(), false);
if (locPart != null && locPart.reserve()) {
try {
// Skip renting partitions.
if (locPart.state() == RENTING) {
tx.addInvalidPartition(ctx.cacheId(), part);
continue;
}
CacheObject val = null;
EntryProcessor entryProc = null;
Object[] invokeArgs = null;
boolean needOldVal = tx.txState().useMvccCaching(ctx.cacheId());
Message val0 = vals != null ? vals.get(i) : null;
CacheEntryInfoCollection entries = val0 instanceof CacheEntryInfoCollection ? (CacheEntryInfoCollection) val0 : null;
if (entries == null && !op.isDeleteOrLock() && !op.isInvoke())
val = (val0 instanceof CacheObject) ? (CacheObject) val0 : null;
if (entries == null && op.isInvoke()) {
assert val0 instanceof GridInvokeValue;
GridInvokeValue invokeVal = (GridInvokeValue) val0;
entryProc = invokeVal.entryProcessor();
invokeArgs = invokeVal.invokeArgs();
}
assert entries != null || entryProc != null || !op.isInvoke() : "entryProc=" + entryProc + ", op=" + op;
GridDhtCacheEntry entry = dht.entryExx(key, tx.topologyVersion());
GridCacheUpdateTxResult updRes;
while (true) {
ctx.shared().database().checkpointReadLock();
try {
if (entries == null) {
switch(op) {
case DELETE:
updRes = entry.mvccRemove(tx, ctx.localNodeId(), tx.topologyVersion(), snapshot, false, needOldVal, null, false);
break;
case INSERT:
case TRANSFORM:
case UPSERT:
case UPDATE:
updRes = entry.mvccSet(tx, ctx.localNodeId(), val, entryProc, invokeArgs, 0, tx.topologyVersion(), snapshot, op.cacheOperation(), false, false, needOldVal, null, false, false);
break;
default:
throw new IgniteSQLException("Cannot acquire lock for operation [op= " + op + "]" + "Operation is unsupported at the moment ", IgniteQueryErrorCode.UNSUPPORTED_OPERATION);
}
} else {
updRes = entry.mvccUpdateRowsWithPreloadInfo(tx, ctx.localNodeId(), tx.topologyVersion(), entries.infos(), op.cacheOperation(), snapshot, futId, batchNum);
}
break;
} catch (GridCacheEntryRemovedException ignore) {
entry = dht.entryExx(key);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
if (!updRes.filtered())
ctx.shared().mvccCaching().addEnlisted(key, updRes.newValue(), 0, 0, tx.xidVersion(), updRes.oldValue(), tx.local(), tx.topologyVersion(), snapshot, ctx.cacheId(), tx, futId, batchNum);
assert updRes.updateFuture() == null : "Entry should not be locked on the backup";
} finally {
locPart.release();
}
} else
tx.addInvalidPartition(ctx.cacheId(), part);
} catch (GridDhtInvalidPartitionException e) {
tx.addInvalidPartition(ctx.cacheId(), e.partition());
}
}
}
Aggregations