use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param dhtFut DHT update future if has backups.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param batchRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final UpdateBatchResult batchRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
boolean checkReaders = hasNear || ctx.discovery().hasNearCache(ctx.cacheId(), topVer);
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert Thread.holdsLock(entry);
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false)))
continue;
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
Collection<UUID> readers = null;
Collection<UUID> filteredReaders = null;
if (checkReaders) {
readers = entry.readers();
filteredReaders = F.view(entry.readers(), F.notEqualTo(nearNode.id()));
}
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, req.subjectId(), taskName, null, null, dhtFut);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
batchRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (!F.isEmpty(filteredReaders))
dhtFut.addNearWriteEntries(filteredReaders, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (// Reader became primary or backup.
readers.contains(nearNode.id()))
entry.removeReader(nearNode.id(), req.messageId());
else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
return dhtFut;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class IgnitePutAllLargeBatchSelfTest method checkPutAll.
/**
* @throws Exception If failed.
*/
private void checkPutAll(TransactionConcurrency concurrency, boolean nearEnabled) throws Exception {
this.nearEnabled = nearEnabled;
startGrids(GRID_CNT);
awaitPartitionMapExchange();
try {
IgniteCache<Object, Object> cache = grid(0).cache(DEFAULT_CACHE_NAME);
int keyCnt = 200;
for (int i = 0; i < keyCnt; i++) cache.put(i, i);
// Create readers if near cache is enabled.
for (int g = 1; g < 2; g++) {
for (int i = 30; i < 70; i++) ((IgniteKernal) grid(g)).getCache(DEFAULT_CACHE_NAME).get(i);
}
info(">>> Starting test tx.");
try (Transaction tx = grid(0).transactions().txStart(concurrency, TransactionIsolation.REPEATABLE_READ)) {
Map<Integer, Integer> map = new LinkedHashMap<>();
for (int i = 0; i < keyCnt; i++) map.put(i, i * i);
cache.getAll(map.keySet());
cache.putAll(map);
tx.commit();
}
// Check that no stale transactions left and all locks are released.
for (int g = 0; g < GRID_CNT; g++) {
IgniteKernal k = (IgniteKernal) grid(g);
GridCacheAdapter<Object, Object> cacheAdapter = k.context().cache().internalCache(DEFAULT_CACHE_NAME);
assertEquals(0, cacheAdapter.context().tm().idMapSize());
for (int i = 0; i < keyCnt; i++) {
if (cacheAdapter.isNear()) {
GridDhtCacheEntry entry = (GridDhtCacheEntry) ((GridNearCacheAdapter<Object, Object>) cacheAdapter).dht().peekEx(i);
if (entry != null) {
assertFalse(entry.lockedByAny());
assertTrue(entry.localCandidates().isEmpty());
assertTrue(entry.remoteMvccSnapshot().isEmpty());
}
}
GridCacheEntryEx entry = cacheAdapter.peekEx(i);
if (entry != null) {
assertFalse(entry.lockedByAny());
assertTrue(entry.localCandidates().isEmpty());
assertTrue(entry.remoteMvccSnapshot().isEmpty());
}
}
}
for (int g = 0; g < GRID_CNT; g++) {
IgniteCache<Object, Object> checkCache = grid(g).cache(DEFAULT_CACHE_NAME);
ClusterNode checkNode = grid(g).localNode();
for (int i = 0; i < keyCnt; i++) {
if (grid(g).affinity(DEFAULT_CACHE_NAME).isPrimaryOrBackup(checkNode, i))
assertEquals(i * i, checkCache.localPeek(i, CachePeekMode.PRIMARY, CachePeekMode.BACKUP));
}
}
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.GridDhtCacheEntry in project ignite by apache.
the class GridCacheNearMultiNodeSelfTest method testOptimisticWriteThrough.
/**
* Test Optimistic repeatable read write-through.
*
* @throws Exception If failed.
*/
@SuppressWarnings({ "ConstantConditions" })
public void testOptimisticWriteThrough() throws Exception {
IgniteCache<Integer, String> near = jcache(0);
if (transactional()) {
try (Transaction tx = grid(0).transactions().txStart(OPTIMISTIC, REPEATABLE_READ, 0, 0)) {
near.put(2, "2");
String s = near.getAndPut(3, "3");
assertNotNull(s);
assertEquals("3", s);
assertEquals("2", near.get(2));
assertEquals("3", near.get(3));
GridDhtCacheEntry entry = (GridDhtCacheEntry) dht(primaryGrid(2)).peekEx(2);
if (entry != null)
assertNull("Unexpected entry: " + entry, entry.rawGet());
assertNotNull(localPeek(dht(primaryGrid(3)), 3));
tx.commit();
}
} else {
near.put(2, "2");
String s = near.getAndPut(3, "3");
assertNotNull(s);
assertEquals("3", s);
}
assertEquals("2", near.localPeek(2));
assertEquals("3", near.localPeek(3));
assertEquals("2", localPeek(dht(primaryGrid(2)), 2));
assertEquals("3", localPeek(dht(primaryGrid(3)), 3));
assertEquals(2, near.localSize(CachePeekMode.ALL));
assertEquals(2, near.localSize(CachePeekMode.ALL));
}
Aggregations