use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method updateAllAsyncInternal0.
/**
* Executes local update after preloader fetched values.
*
* @param nodeId Node ID.
* @param req Update request.
* @param completionCb Completion callback.
*/
private void updateAllAsyncInternal0(UUID nodeId, GridNearAtomicAbstractUpdateRequest req, UpdateReplyClosure completionCb) {
ClusterNode node = ctx.discovery().node(nodeId);
if (node == null) {
U.warn(msgLog, "Skip near update request, node originated update request left [" + "futId=" + req.futureId() + ", node=" + nodeId + ']');
return;
}
GridNearAtomicUpdateResponse res = new GridNearAtomicUpdateResponse(ctx.cacheId(), nodeId, req.futureId(), req.partition(), false, ctx.deploymentEnabled());
assert !req.returnValue() || (req.operation() == TRANSFORM || req.size() == 1);
GridDhtAtomicAbstractUpdateFuture dhtFut = null;
boolean remap = false;
String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
IgniteCacheExpiryPolicy expiry = null;
try {
// If batch store update is enabled, we need to lock all entries.
// First, need to acquire locks on cache entries, then check filter.
List<GridDhtCacheEntry> locked = null;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
try {
GridDhtPartitionTopology top = topology();
top.readLock();
try {
if (top.stopping()) {
res.addFailedKeys(req.keys(), new IgniteCheckedException("Failed to perform cache operation " + "(cache is stopped): " + name()));
completionCb.apply(req, res);
return;
}
// external transaction or explicit lock.
if (req.topologyLocked() || !needRemap(req.topologyVersion(), top.topologyVersion())) {
ctx.shared().database().ensureFreeSpace(ctx.memoryPolicy());
locked = lockEntries(req, req.topologyVersion());
boolean hasNear = ctx.discovery().cacheNearNode(node, name());
// Assign next version for update inside entries lock.
GridCacheVersion ver = ctx.versions().next(top.topologyVersion());
if (hasNear)
res.nearVersion(ver);
if (msgLog.isDebugEnabled()) {
msgLog.debug("Assigned update version [futId=" + req.futureId() + ", writeVer=" + ver + ']');
}
assert ver != null : "Got null version for update request: " + req;
boolean sndPrevVal = !top.rebalanceFinished(req.topologyVersion());
dhtFut = createDhtFuture(ver, req);
expiry = expiryPolicy(req.expiry());
GridCacheReturn retVal = null;
if (// Several keys ...
req.size() > 1 && writeThrough() && // and store is enabled ...
!req.skipStore() && // and this is not local store ...
!ctx.store().isLocal() && // (conflict resolver should be used for local store)
!// and no DR.
ctx.dr().receiveEnabled()) {
// This method can only be used when there are no replicated entries in the batch.
UpdateBatchResult updRes = updateWithBatch(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
deleted = updRes.deleted();
dhtFut = updRes.dhtFuture();
if (req.operation() == TRANSFORM)
retVal = updRes.invokeResults();
} else {
UpdateSingleResult updRes = updateSingle(node, hasNear, req, res, locked, ver, dhtFut, ctx.isDrEnabled(), taskName, expiry, sndPrevVal);
retVal = updRes.returnValue();
deleted = updRes.deleted();
dhtFut = updRes.dhtFuture();
}
if (retVal == null)
retVal = new GridCacheReturn(ctx, node.isLocal(), true, null, true);
res.returnValue(retVal);
if (dhtFut != null) {
if (req.writeSynchronizationMode() == PRIMARY_SYNC && // To avoid deadlock disable back-pressure for sender data node.
!ctx.discovery().cacheAffinityNode(ctx.discovery().node(nodeId), ctx.name()) && !dhtFut.isDone()) {
final IgniteRunnable tracker = GridNioBackPressureControl.threadTracker();
if (tracker != null && tracker instanceof GridNioMessageTracker) {
((GridNioMessageTracker) tracker).onMessageReceived();
dhtFut.listen(new IgniteInClosure<IgniteInternalFuture<Void>>() {
@Override
public void apply(IgniteInternalFuture<Void> fut) {
((GridNioMessageTracker) tracker).onMessageProcessed();
}
});
}
}
ctx.mvcc().addAtomicFuture(dhtFut.id(), dhtFut);
}
} else {
// Should remap all keys.
remap = true;
res.remapTopologyVersion(top.topologyVersion());
}
} finally {
top.readUnlock();
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not become obsolete while holding lock.";
e.printStackTrace();
} finally {
if (locked != null)
unlockEntries(locked, req.topologyVersion());
// Enqueue if necessary after locks release.
if (deleted != null) {
assert !deleted.isEmpty();
assert ctx.deferredDelete() : this;
for (IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion> e : deleted) ctx.onDeferredDelete(e.get1(), e.get2());
}
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().fsync(null);
}
} catch (GridDhtInvalidPartitionException ignore) {
if (log.isDebugEnabled())
log.debug("Caught invalid partition exception for cache entry (will remap update request): " + req);
remap = true;
res.remapTopologyVersion(ctx.topology().topologyVersion());
} catch (Throwable e) {
// At least RuntimeException can be thrown by the code above when GridCacheContext is cleaned and there is
// an attempt to use cleaned resources.
U.error(log, "Unexpected exception during cache update", e);
res.addFailedKeys(req.keys(), e);
completionCb.apply(req, res);
if (e instanceof Error)
throw (Error) e;
return;
}
if (remap) {
assert dhtFut == null;
completionCb.apply(req, res);
} else if (dhtFut != null)
dhtFut.map(node, res.returnValue(), res, completionCb);
if (req.writeSynchronizationMode() != FULL_ASYNC)
req.cleanup(!node.isLocal());
sendTtlUpdateRequest(expiry);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method updateSingle.
/**
* Updates locked entries one-by-one.
*
* @param nearNode Originating node.
* @param hasNear {@code True} if originating node has near cache.
* @param req Update request.
* @param res Update response.
* @param locked Locked entries.
* @param ver Assigned update version.
* @param dhtFut Optional DHT future.
* @param replicate Whether DR is enabled for that cache.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Return value.
* @throws GridCacheEntryRemovedException Should be never thrown.
*/
private UpdateSingleResult updateSingle(ClusterNode nearNode, boolean hasNear, GridNearAtomicAbstractUpdateRequest req, GridNearAtomicUpdateResponse res, List<GridDhtCacheEntry> locked, GridCacheVersion ver, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, boolean replicate, String taskName, @Nullable IgniteCacheExpiryPolicy expiry, boolean sndPrevVal) throws GridCacheEntryRemovedException {
GridCacheReturn retVal = null;
Collection<IgniteBiTuple<GridDhtCacheEntry, GridCacheVersion>> deleted = null;
AffinityTopologyVersion topVer = req.topologyVersion();
boolean checkReaders = hasNear || ctx.discovery().hasNearCache(ctx.cacheId(), topVer);
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < req.size(); i++) {
KeyCacheObject k = req.key(i);
GridCacheOperation op = req.operation();
// No GridCacheEntryRemovedException can be thrown.
try {
GridDhtCacheEntry entry = locked.get(i);
GridCacheVersion newConflictVer = req.conflictVersion(i);
long newConflictTtl = req.conflictTtl(i);
long newConflictExpireTime = req.conflictExpireTime(i);
assert !(newConflictVer instanceof GridCacheVersionEx) : newConflictVer;
Object writeVal = op == TRANSFORM ? req.entryProcessor(i) : req.writeValue(i);
Collection<UUID> readers = null;
Collection<UUID> filteredReaders = null;
if (checkReaders) {
readers = entry.readers();
filteredReaders = F.view(entry.readers(), F.notEqualTo(nearNode.id()));
}
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, req.invokeArguments(), writeThrough() && !req.skipStore(), !req.skipStore(), sndPrevVal || req.returnValue(), req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, req.filter(), replicate ? DR_PRIMARY : DR_NONE, newConflictTtl, newConflictExpireTime, newConflictVer, /*conflictResolve*/
true, intercept, req.subjectId(), taskName, /*prevVal*/
null, /*updateCntr*/
null, dhtFut);
if (dhtFut != null) {
if (updRes.sendToDht()) {
// Send to backups even in case of remove-remove scenarios.
GridCacheVersionConflictContext<?, ?> conflictCtx = updRes.conflictResolveResult();
if (conflictCtx == null)
newConflictVer = null;
else if (conflictCtx.isMerge())
// Conflict version is discarded in case of merge.
newConflictVer = null;
EntryProcessor<Object, Object, Object> entryProcessor = null;
dhtFut.addWriteEntry(affAssignment, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime(), newConflictVer, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (!F.isEmpty(filteredReaders))
dhtFut.addNearWriteEntries(filteredReaders, entry, updRes.newValue(), entryProcessor, updRes.newTtl(), updRes.conflictExpireTime());
} else {
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter or conflict resolution (will skip write) " + "[entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ']');
}
}
if (hasNear) {
if (updRes.sendToDht()) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
// If put the same value as in request then do not need to send it back.
if (op == TRANSFORM || writeVal != updRes.newValue()) {
res.addNearValue(i, updRes.newValue(), updRes.newTtl(), updRes.conflictExpireTime());
} else
res.addNearTtl(i, updRes.newTtl(), updRes.conflictExpireTime());
if (updRes.newValue() != null) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (// Reader became primary or backup.
F.contains(readers, nearNode.id()))
entry.removeReader(nearNode.id(), req.messageId());
else
res.addSkippedIndex(i);
} else
res.addSkippedIndex(i);
}
if (updRes.removeVersion() != null) {
if (deleted == null)
deleted = new ArrayList<>(req.size());
deleted.add(F.t(entry, updRes.removeVersion()));
}
if (op == TRANSFORM) {
assert !req.returnValue();
IgniteBiTuple<Object, Exception> compRes = updRes.computedResult();
if (compRes != null && (compRes.get1() != null || compRes.get2() != null)) {
if (retVal == null)
retVal = new GridCacheReturn(nearNode.isLocal());
retVal.addEntryProcessResult(ctx, k, null, compRes.get1(), compRes.get2(), req.keepBinary());
}
} else {
// Create only once.
if (retVal == null) {
CacheObject ret = updRes.oldValue();
retVal = new GridCacheReturn(ctx, nearNode.isLocal(), req.keepBinary(), req.returnValue() ? ret : null, updRes.success());
}
}
} catch (IgniteCheckedException e) {
res.addFailedKey(k, e);
}
}
return new UpdateSingleResult(retVal, deleted, dhtFut);
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method updateWithBatch.
/**
* Updates locked entries using batched write-through.
*
* @param node Sender node.
* @param hasNear {@code True} if originating node has near cache.
* @param req Update request.
* @param res Update response.
* @param locked Locked entries.
* @param ver Assigned version.
* @param dhtFut Optional DHT future.
* @param replicate Whether replication is enabled.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
* @throws GridCacheEntryRemovedException Should not be thrown.
*/
@SuppressWarnings("unchecked")
private UpdateBatchResult updateWithBatch(final ClusterNode node, final boolean hasNear, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final List<GridDhtCacheEntry> locked, final GridCacheVersion ver, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final boolean replicate, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) throws GridCacheEntryRemovedException {
// Cannot update in batches during DR due to possible conflicts.
assert !ctx.dr().receiveEnabled();
// Should not request return values for putAll.
assert !req.returnValue() || req.operation() == TRANSFORM;
if (!F.isEmpty(req.filter()) && ctx.loadPreviousValue()) {
try {
reloadIfNeeded(locked);
} catch (IgniteCheckedException e) {
res.addFailedKeys(req.keys(), e);
return new UpdateBatchResult();
}
}
int size = req.size();
Map<KeyCacheObject, CacheObject> putMap = null;
Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap = null;
Collection<KeyCacheObject> rmvKeys = null;
List<CacheObject> writeVals = null;
UpdateBatchResult updRes = new UpdateBatchResult();
List<GridDhtCacheEntry> filtered = new ArrayList<>(size);
GridCacheOperation op = req.operation();
GridCacheReturn invokeRes = null;
int firstEntryIdx = 0;
boolean intercept = ctx.config().getInterceptor() != null;
for (int i = 0; i < locked.size(); i++) {
GridDhtCacheEntry entry = locked.get(i);
try {
if (!checkFilter(entry, req, res)) {
if (expiry != null && entry.hasValue()) {
long ttl = expiry.forAccess();
if (ttl != CU.TTL_NOT_CHANGED) {
entry.updateTtl(null, ttl);
expiry.ttlUpdated(entry.key(), entry.version(), entry.readers());
}
}
if (log.isDebugEnabled())
log.debug("Entry did not pass the filter (will skip write) [entry=" + entry + ", filter=" + Arrays.toString(req.filter()) + ", res=" + res + ']');
if (hasNear)
res.addSkippedIndex(i);
firstEntryIdx++;
continue;
}
if (op == TRANSFORM) {
EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
CacheObject old = entry.innerGet(ver, null, /*read through*/
true, /*metrics*/
true, /*event*/
true, req.subjectId(), entryProcessor, taskName, null, req.keepBinary());
Object oldVal = null;
Object updatedVal = null;
CacheInvokeEntry<Object, Object> invokeEntry = new CacheInvokeEntry(entry.key(), old, entry.version(), req.keepBinary(), entry);
CacheObject updated;
try {
Object computed = entryProcessor.process(invokeEntry, req.invokeArguments());
if (computed != null) {
if (invokeRes == null)
invokeRes = new GridCacheReturn(node.isLocal());
computed = ctx.unwrapTemporary(computed);
invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), computed, null, req.keepBinary());
}
if (!invokeEntry.modified())
continue;
updatedVal = ctx.unwrapTemporary(invokeEntry.getValue());
updated = ctx.toCacheObject(updatedVal);
} catch (Exception e) {
if (invokeRes == null)
invokeRes = new GridCacheReturn(node.isLocal());
invokeRes.addEntryProcessResult(ctx, entry.key(), invokeEntry.key(), null, e, req.keepBinary());
updated = old;
}
if (updated == null) {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(e);
if (ctx.cancelRemove(interceptorRes))
continue;
}
// Update previous batch.
if (putMap != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, null, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
putMap = null;
writeVals = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
// Start collecting new batch.
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
} else {
if (intercept) {
CacheLazyEntry e = new CacheLazyEntry(ctx, entry.key(), invokeEntry.key(), old, oldVal, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(e, updatedVal);
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
// Update previous batch.
if (rmvKeys != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, null, null, rmvKeys, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
firstEntryIdx = i;
rmvKeys = null;
entryProcessorMap = null;
filtered = new ArrayList<>();
}
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
}
if (entryProcessorMap == null)
entryProcessorMap = new HashMap<>();
entryProcessorMap.put(entry.key(), entryProcessor);
} else if (op == UPDATE) {
CacheObject updated = req.value(i);
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, req.subjectId(), null, taskName, null, req.keepBinary());
Object val = ctx.config().getInterceptor().onBeforePut(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()), ctx.unwrapBinaryIfNeeded(updated, req.keepBinary(), false));
if (val == null)
continue;
updated = ctx.toCacheObject(ctx.unwrapTemporary(val));
}
assert updated != null;
if (putMap == null) {
putMap = new LinkedHashMap<>(size, 1.0f);
writeVals = new ArrayList<>(size);
}
putMap.put(entry.key(), updated);
writeVals.add(updated);
} else {
assert op == DELETE;
if (intercept) {
CacheObject old = entry.innerGet(null, null, /*read through*/
ctx.loadPreviousValue(), /*metrics*/
true, /*event*/
true, req.subjectId(), null, taskName, null, req.keepBinary());
IgniteBiTuple<Boolean, ?> interceptorRes = ctx.config().getInterceptor().onBeforeRemove(new CacheLazyEntry(ctx, entry.key(), old, req.keepBinary()));
if (ctx.cancelRemove(interceptorRes))
continue;
}
if (rmvKeys == null)
rmvKeys = new ArrayList<>(size);
rmvKeys.add(entry.key());
}
filtered.add(entry);
} catch (IgniteCheckedException e) {
res.addFailedKey(entry.key(), e);
}
}
// Store final batch.
if (putMap != null || rmvKeys != null) {
dhtFut = updatePartialBatch(hasNear, firstEntryIdx, filtered, ver, node, writeVals, putMap, rmvKeys, entryProcessorMap, dhtFut, req, res, replicate, updRes, taskName, expiry, sndPrevVal);
} else
assert filtered.isEmpty();
updRes.dhtFuture(dhtFut);
updRes.invokeResult(invokeRes);
return updRes;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method processDhtAtomicUpdateRequest.
/**
* @param nodeId Sender node ID.
* @param req Dht atomic update request.
*/
private void processDhtAtomicUpdateRequest(UUID nodeId, GridDhtAtomicAbstractUpdateRequest req) {
assert Thread.currentThread().getName().startsWith("sys-stripe-") : Thread.currentThread().getName();
if (msgLog.isDebugEnabled()) {
msgLog.debug("Received DHT atomic update request [futId=" + req.futureId() + ", writeVer=" + req.writeVersion() + ", node=" + nodeId + ']');
}
assert req.partition() >= 0 : req;
GridCacheVersion ver = req.writeVersion();
GridDhtAtomicNearResponse nearRes = null;
if (req.nearNodeId() != null) {
nearRes = new GridDhtAtomicNearResponse(ctx.cacheId(), req.partition(), req.nearFutureId(), nodeId, req.flags());
}
boolean replicate = ctx.isDrEnabled();
boolean intercept = req.forceTransformBackups() && ctx.config().getInterceptor() != null;
String taskName = ctx.kernalContext().task().resolveTaskName(req.taskNameHash());
ctx.shared().database().checkpointReadLock();
try {
for (int i = 0; i < req.size(); i++) {
KeyCacheObject key = req.key(i);
try {
while (true) {
GridDhtCacheEntry entry = null;
try {
entry = entryExx(key);
CacheObject val = req.value(i);
CacheObject prevVal = req.previousValue(i);
EntryProcessor<Object, Object, Object> entryProcessor = req.entryProcessor(i);
Long updateIdx = req.updateCounter(i);
GridCacheOperation op = entryProcessor != null ? TRANSFORM : (val != null) ? UPDATE : DELETE;
long ttl = req.ttl(i);
long expireTime = req.conflictExpireTime(i);
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nodeId, nodeId, op, op == TRANSFORM ? entryProcessor : val, op == TRANSFORM ? req.invokeArguments() : null, /*write-through*/
(ctx.store().isLocal() && !ctx.shared().localStorePrimaryOnly()) && writeThrough() && !req.skipStore(), /*read-through*/
false, /*retval*/
false, req.keepBinary(), /*expiry policy*/
null, /*event*/
true, /*metrics*/
true, /*primary*/
false, /*check version*/
!req.forceTransformBackups(), req.topologyVersion(), CU.empty0(), replicate ? DR_BACKUP : DR_NONE, ttl, expireTime, req.conflictVersion(i), false, intercept, req.subjectId(), taskName, prevVal, updateIdx, null);
if (updRes.removeVersion() != null)
ctx.onDeferredDelete(entry, updRes.removeVersion());
entry.onUnlock();
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry while updating backup value (will retry): " + key);
entry = null;
} finally {
if (entry != null)
ctx.evicts().touch(entry, req.topologyVersion());
}
}
} catch (NodeStoppingException e) {
U.error(log, "Failed to update key on backup (local node is stopping):" + key, e);
return;
} catch (GridDhtInvalidPartitionException ignored) {
// Ignore.
} catch (IgniteCheckedException e) {
IgniteCheckedException err = new IgniteCheckedException("Failed to update key on backup node: " + key, e);
if (nearRes != null)
nearRes.addFailedKey(key, err);
U.error(log, "Failed to update key on backup node: " + key, e);
}
}
} finally {
ctx.shared().database().checkpointReadUnlock();
}
GridDhtAtomicUpdateResponse dhtRes = null;
if (isNearEnabled(cacheCfg)) {
List<KeyCacheObject> nearEvicted = ((GridNearAtomicCache<K, V>) near()).processDhtAtomicUpdateRequest(nodeId, req, nearRes);
if (nearEvicted != null) {
dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
dhtRes.nearEvicted(nearEvicted);
}
}
try {
// TODO fire events only after successful fsync
if (ctx.shared().wal() != null)
ctx.shared().wal().fsync(null);
} catch (StorageException e) {
if (dhtRes != null)
dhtRes.onError(new IgniteCheckedException(e));
if (nearRes != null)
nearRes.onClassError(e);
} catch (IgniteCheckedException e) {
if (dhtRes != null)
dhtRes.onError(e);
if (nearRes != null)
nearRes.onClassError(e);
}
if (nearRes != null)
sendDhtNearResponse(req, nearRes);
if (dhtRes == null && req.replyWithoutDelay()) {
dhtRes = new GridDhtAtomicUpdateResponse(ctx.cacheId(), req.partition(), req.futureId(), ctx.deploymentEnabled());
}
if (dhtRes != null)
sendDhtPrimaryResponse(nodeId, req, dhtRes);
else
sendDeferredUpdateResponse(req.partition(), nodeId, req.futureId());
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method reloadIfNeeded.
/**
* @param entries Entries.
* @throws IgniteCheckedException If failed.
*/
private void reloadIfNeeded(final List<GridDhtCacheEntry> entries) throws IgniteCheckedException {
Map<KeyCacheObject, Integer> needReload = null;
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
if (entry == null)
continue;
CacheObject val = entry.rawGet();
if (val == null) {
if (needReload == null)
needReload = new HashMap<>(entries.size(), 1.0f);
needReload.put(entry.key(), i);
}
}
if (needReload != null) {
final Map<KeyCacheObject, Integer> idxMap = needReload;
ctx.store().loadAll(null, needReload.keySet(), new CI2<KeyCacheObject, Object>() {
@Override
public void apply(KeyCacheObject k, Object v) {
Integer idx = idxMap.get(k);
if (idx != null) {
GridDhtCacheEntry entry = entries.get(idx);
try {
GridCacheVersion ver = entry.version();
entry.versionedValue(ctx.toCacheObject(v), null, ver, null, null);
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry should not get obsolete while holding lock [entry=" + entry + ", e=" + e + ']';
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
}
});
}
}
Aggregations