use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param dhtFut DHT update future if has backups.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param batchRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final DhtAtomicUpdateResult batchRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert entry.lockedByCurrentThread();
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false)))
continue;
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
// Get readers before innerUpdate (reader cleared after remove).
GridDhtCacheEntry.ReaderId[] readers = entry.readersLocked();
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, req.subjectId(), taskName, null, null, dhtFut);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
batchRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (readers != null)
dhtFut.addNearWriteEntries(nearNode, readers, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (GridDhtCacheEntry.ReaderId.contains(readers, nearNode.id())) {
// Reader became primary or backup.
entry.removeReader(nearNode.id(), req.messageId());
} else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
return dhtFut;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtLocalPartition method clearAll.
/**
* Removes all entries and rows from this partition.
*
* @return Number of rows cleared from page memory.
* @throws NodeStoppingException If node stopping.
*/
protected long clearAll(EvictionContext evictionCtx) throws NodeStoppingException {
long order = clearVer;
GridCacheVersion clearVer = ctx.versions().startVersion();
GridCacheObsoleteEntryExtras extras = new GridCacheObsoleteEntryExtras(clearVer);
boolean rec = grp.eventRecordable(EVT_CACHE_REBALANCE_OBJECT_UNLOADED);
long cleared = 0;
int stopCntr = 0;
CacheMapHolder hld = grp.sharedGroup() ? null : singleCacheEntryMap;
boolean recoveryMode = ctx.kernalContext().recoveryMode();
try {
// because atomic and transactional caches use different partition counters implementation.
if (state() == MOVING && !recoveryMode && grp.walEnabled() && grp.config().getAtomicityMode() == ATOMIC)
ctx.wal().log(new PartitionClearingStartRecord(id, grp.groupId(), order));
GridIterator<CacheDataRow> it0 = grp.offheap().partitionIterator(id);
while (it0.hasNext()) {
if ((stopCntr = (stopCntr + 1) & 1023) == 0 && evictionCtx.shouldStop())
return cleared;
ctx.database().checkpointReadLock();
try {
CacheDataRow row = it0.next();
// Do not clear fresh rows in case of partition reloading.
// This is required because normal updates are possible to moving partition which is currently cleared.
// We can clean OWNING partition if a partition has been reset from lost state.
// In this case new updates must be preserved.
// Partition state can be switched from RENTING to MOVING and vice versa during clearing.
long order0 = row.version().order();
if ((state() == MOVING || recoveryMode) && (order0 == 0 || /**
* Inserted by isolated updater.
*/
order0 > order))
continue;
if (grp.sharedGroup() && (hld == null || hld.cctx.cacheId() != row.cacheId()))
hld = cacheMapHolder(ctx.cacheContext(row.cacheId()));
assert hld != null;
GridCacheMapEntry cached = putEntryIfObsoleteOrAbsent(hld, hld.cctx, grp.affinity().lastVersion(), row.key(), true, true);
assert cached != null : "Expecting the reservation " + this;
if (cached.deleted())
continue;
if (cached instanceof GridDhtCacheEntry && ((GridDhtCacheEntry) cached).clearInternal(clearVer, extras)) {
removeEntry(cached);
if (rec && !hld.cctx.config().isEventsDisabled()) {
hld.cctx.events().addEvent(cached.partition(), cached.key(), ctx.localNodeId(), null, null, null, EVT_CACHE_REBALANCE_OBJECT_UNLOADED, null, false, cached.rawGet(), cached.hasValue(), null, null, false);
}
cleared++;
}
} catch (GridDhtInvalidPartitionException e) {
assert isEmpty() && state() == EVICTED : "Invalid error [e=" + e + ", part=" + this + ']';
// Partition is already concurrently cleared and evicted.
break;
} finally {
ctx.database().checkpointReadUnlock();
}
}
if (forceTestCheckpointOnEviction) {
if (partWhereTestCheckpointEnforced == null && cleared >= fullSize()) {
ctx.database().forceCheckpoint("test").futureFor(FINISHED).get();
log.warning("Forced checkpoint by test reasons for partition: " + this);
partWhereTestCheckpointEnforced = id;
}
}
// Attempt to destroy.
if (!recoveryMode)
((GridDhtPreloader) grp.preloader()).tryFinishEviction(this);
} catch (NodeStoppingException e) {
if (log.isDebugEnabled())
log.debug("Failed to get iterator for evicted partition: " + id);
throw e;
} catch (IgniteCheckedException e) {
U.error(log, "Failed to get iterator for evicted partition: " + id, e);
}
return cleared;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridCacheContext method map.
/**
* @param entry Entry.
* @param nodes Nodes.
* @param map Map.
*/
private void map(GridDhtCacheEntry entry, Iterable<ClusterNode> nodes, Map<ClusterNode, List<GridDhtCacheEntry>> map) {
if (nodes != null) {
for (ClusterNode n : nodes) {
List<GridDhtCacheEntry> entries = map.get(n);
if (entries == null)
map.put(n, entries = new LinkedList<>());
entries.add(entry);
}
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param dhtUpdRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
*/
@Nullable
private void updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final DhtAtomicUpdateResult dhtUpdRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
final GridDhtAtomicAbstractUpdateFuture dhtFut = dhtUpdRes.dhtFuture();
Collection<Object> failedToUnwrapKeys = null;
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert entry.lockedByCurrentThread();
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null) {
Object key = entry.key();
try {
key = entry.key().value(ctx.cacheObjectContext(), false);
} catch (BinaryInvalidTypeException e) {
if (log.isDebugEnabled()) {
if (failedToUnwrapKeys == null)
failedToUnwrapKeys = new ArrayList<>();
// To limit keys count in log message.
if (failedToUnwrapKeys.size() < 5)
failedToUnwrapKeys.add(key);
}
}
if (storeErr.failedKeys().contains(key))
continue;
}
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
// Get readers before innerUpdate (reader cleared after remove).
GridDhtCacheEntry.ReaderId[] readers = entry.readersLocked();
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, taskName, null, null, dhtFut, entryProcessor != null);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
dhtUpdRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter(), op);
if (readers != null)
dhtFut.addNearWriteEntries(nearNode, readers, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (GridDhtCacheEntry.ReaderId.contains(readers, nearNode.id())) {
// Reader became primary or backup.
entry.removeReader(nearNode.id(), req.messageId());
} else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
dhtUpdRes.processedEntriesCount(firstEntryIdx + i + 1);
}
if (failedToUnwrapKeys != null) {
log.warning("Failed to get values of keys: " + failedToUnwrapKeys + " (the binary objects will be used instead).");
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method unlockEntries.
/**
* Releases java-level locks on cache entries.
*
* @param locked Locked entries.
* @param topVer Topology version.
*/
private void unlockEntries(List<GridDhtCacheEntry> locked, AffinityTopologyVersion topVer) {
// Process deleted entries before locks release.
assert ctx.deferredDelete() : this;
// Entries to skip eviction manager notification for.
// Enqueue entries while holding locks.
Collection<KeyCacheObject> skip = null;
int size = locked.size();
try {
for (int i = 0; i < size; i++) {
GridCacheMapEntry entry = locked.get(i);
if (entry != null && entry.deleted()) {
if (skip == null)
skip = U.newHashSet(locked.size());
skip.add(entry.key());
}
}
} finally {
// That's why releasing locks in the finally block..
for (int i = 0; i < size; i++) {
GridCacheMapEntry entry = locked.get(i);
if (entry != null)
entry.unlockEntry();
}
}
// Try evict partitions.
for (int i = 0; i < size; i++) {
GridDhtCacheEntry entry = locked.get(i);
if (entry != null)
entry.onUnlock();
}
if (skip != null && skip.size() == size)
// Optimization.
return;
// Eviction manager will remove empty entries.
for (int i = 0; i < size; i++) {
GridCacheMapEntry entry = locked.get(i);
if (entry != null && (skip == null || !skip.contains(entry.key())))
entry.touch();
}
}
Aggregations