use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method testWalIteratorOverPartitionWithMissingEntries.
/**
* Tests if history iterator work correctly if partition contains missed due to rollback updates.
*/
@Test
public void testWalIteratorOverPartitionWithMissingEntries() throws Exception {
System.setProperty(IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD, "0");
try {
Ignite ignite = startGrid();
ignite.cluster().active(true);
awaitPartitionMapExchange();
int totalKeys = 30;
final int part = 1;
List<Integer> keys = partitionKeys(ignite.cache(CACHE_NAME), part, totalKeys, 0);
ignite.cache(CACHE_NAME).put(keys.get(0), keys.get(0));
ignite.cache(CACHE_NAME).put(keys.get(1), keys.get(1));
int rolledBack = 0;
rolledBack += prepareTx(ignite, keys.subList(2, 6));
for (Integer key : keys.subList(6, 10)) ignite.cache(CACHE_NAME).put(key, key);
rolledBack += prepareTx(ignite, keys.subList(10, 14));
for (Integer key : keys.subList(14, 20)) ignite.cache(CACHE_NAME).put(key, key);
rolledBack += prepareTx(ignite, keys.subList(20, 25));
for (Integer key : keys.subList(25, 30)) ignite.cache(CACHE_NAME).put(key, key);
assertEquals(totalKeys - rolledBack, ignite.cache(CACHE_NAME).size());
// Expecting counters: 1-2, missed 3-6, 7-10, missed 11-14, 15-20, missed 21-25, 26-30
List<CacheDataRow> rows = rows(ignite, part, 0, 4);
assertEquals(2, rows.size());
assertEquals(keys.get(0), rows.get(0).key().value(null, false));
assertEquals(keys.get(1), rows.get(1).key().value(null, false));
rows = rows(ignite, part, 3, 4);
assertEquals(0, rows.size());
rows = rows(ignite, part, 4, 23);
assertEquals(10, rows.size());
int i = 0;
for (Integer key : keys.subList(6, 10)) assertEquals(key, rows.get(i++).key().value(null, false));
for (Integer key : keys.subList(14, 20)) assertEquals(key, rows.get(i++).key().value(null, false));
i = 0;
rows = rows(ignite, part, 16, 26);
assertEquals(5, rows.size());
for (Integer key : keys.subList(16, 20)) assertEquals(key, rows.get(i++).key().value(null, false));
assertEquals(keys.get(25), rows.get(i).key().value(null, false));
} finally {
stopAllGrids();
System.clearProperty(IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method testHistoricalRebalanceIterator.
/**
* @throws Exception if failed.
*/
@Test
public void testHistoricalRebalanceIterator() throws Exception {
System.setProperty(IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD, "0");
extraCcfg = new CacheConfiguration(CACHE_NAME + "2");
extraCcfg.setAffinity(new RendezvousAffinityFunction(false, PARTS));
Ignite ignite = startGrid();
try {
ignite.cluster().active(true);
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ((IgniteEx) ignite).context().cache().context().database();
dbMgr.waitForCheckpoint("test");
// This number depends on wal history size.
int entries = 25;
IgniteCache<Integer, Integer> cache = ignite.cache(CACHE_NAME);
IgniteCache<Integer, Integer> cache2 = ignite.cache(CACHE_NAME + "2");
for (int i = 0; i < entries; i++) {
// Put to partition 0.
cache.put(i * PARTS, i * PARTS);
// Put to partition 1.
cache.put(i * PARTS + 1, i * PARTS + 1);
// Put to another cache.
cache2.put(i, i);
dbMgr.waitForCheckpoint("test");
}
for (int i = 0; i < entries; i++) {
assertEquals((Integer) (i * PARTS), cache.get(i * PARTS));
assertEquals((Integer) (i * PARTS + 1), cache.get(i * PARTS + 1));
assertEquals((Integer) (i), cache2.get(i));
}
CacheGroupContext grp = ((IgniteEx) ignite).context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
IgniteCacheOffheapManager offh = grp.offheap();
AffinityTopologyVersion topVer = grp.affinity().lastVersion();
IgniteDhtDemandedPartitionsMap map;
for (int i = 0; i < entries; i++) {
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(0, i, entries, PARTS);
WALPointer ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(0));
for (int j = i; j < entries; j++) {
assertTrue("i=" + i + ", j=" + j, it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(1, i, entries, PARTS);
ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(1));
for (int j = i; j < entries; j++) {
assertTrue(it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS + 1, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS + 1, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
}
stopAllGrids();
// Check that iterator is valid after restart.
ignite = startGrid();
ignite.cluster().active(true);
grp = ((IgniteEx) ignite).context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
offh = grp.offheap();
topVer = grp.affinity().lastVersion();
for (int i = 0; i < entries; i++) {
long start = System.currentTimeMillis();
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(0, i, entries, PARTS);
WALPointer ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
long end = System.currentTimeMillis();
info("Time to get iterator: " + (end - start));
assertTrue("Not historical for iteration: " + i, it.historical(0));
assertNotNull(it);
start = System.currentTimeMillis();
for (int j = i; j < entries; j++) {
assertTrue("i=" + i + ", j=" + j, it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS, (int) row.value().value(grp.cacheObjectContext(), false));
}
end = System.currentTimeMillis();
info("Time to iterate: " + (end - start));
assertFalse(it.hasNext());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(1, i, entries, PARTS);
ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(1));
for (int j = i; j < entries; j++) {
assertTrue(it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS + 1, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS + 1, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
}
} finally {
stopAllGrids();
System.clearProperty(IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridCacheMapEntry method innerUpdateLocal.
/**
* {@inheritDoc}
*/
@SuppressWarnings("unchecked")
@Override
public GridTuple3<Boolean, Object, EntryProcessorResult<Object>> innerUpdateLocal(GridCacheVersion ver, GridCacheOperation op, @Nullable Object writeObj, @Nullable Object[] invokeArgs, boolean writeThrough, boolean readThrough, boolean retval, boolean keepBinary, @Nullable ExpiryPolicy expiryPlc, boolean evt, boolean metrics, @Nullable CacheEntryPredicate[] filter, boolean intercept, @Nullable UUID subjId, String taskName) throws IgniteCheckedException, GridCacheEntryRemovedException {
assert cctx.isLocal() && cctx.atomic();
CacheObject old;
boolean res = true;
IgniteBiTuple<Boolean, ?> interceptorRes = null;
EntryProcessorResult<Object> invokeRes = null;
lockEntry();
try {
boolean internal = isInternal() || !context().userCache();
Map<UUID, CacheContinuousQueryListener> lsnrCol = cctx.continuousQueries().updateListeners(internal, false);
boolean needVal = retval || intercept || op == GridCacheOperation.TRANSFORM || !F.isEmpty(filter) || lsnrCol != null;
checkObsolete();
CacheDataRow oldRow = null;
// Load and remove from swap if it is new.
if (isNew())
oldRow = unswap(null, false);
old = val;
boolean readFromStore = false;
Object old0 = null;
if (readThrough && needVal && old == null && (cctx.readThrough() && (op == GridCacheOperation.TRANSFORM || cctx.loadPreviousValue()))) {
old0 = readThrough(null, key, false, subjId, taskName);
old = cctx.toCacheObject(old0);
long ttl = CU.TTL_ETERNAL;
long expireTime = CU.EXPIRE_TIME_ETERNAL;
if (expiryPlc != null && old != null) {
ttl = CU.toTtl(expiryPlc.getExpiryForCreation());
if (ttl == CU.TTL_ZERO) {
ttl = CU.TTL_MINIMUM;
expireTime = CU.expireTimeInPast();
} else if (ttl == CU.TTL_NOT_CHANGED)
ttl = CU.TTL_ETERNAL;
else
expireTime = CU.toExpireTime(ttl);
}
// Detach value before index update.
old = cctx.kernalContext().cacheObjects().prepareForCache(old, cctx);
if (old != null)
storeValue(old, expireTime, ver, oldRow);
else
removeValue();
update(old, expireTime, ttl, ver, true);
}
// Apply metrics.
if (metrics && cctx.statisticsEnabled() && needVal) {
// PutIfAbsent methods mustn't update hit/miss statistics
if (op != GridCacheOperation.UPDATE || F.isEmpty(filter) || !cctx.putIfAbsentFilter(filter))
cctx.cache().metrics0().onRead(old != null);
}
// Check filter inside of synchronization.
if (!F.isEmpty(filter)) {
boolean pass = cctx.isAllLocked(this, filter);
if (!pass) {
if (expiryPlc != null && !readFromStore && !cctx.putIfAbsentFilter(filter) && hasValueUnlocked())
updateTtl(expiryPlc);
Object val = retval ? cctx.cacheObjectContext().unwrapBinaryIfNeeded(CU.value(old, cctx, false), keepBinary, false) : null;
return new T3<>(false, val, null);
}
}
String transformCloClsName = null;
CacheObject updated;
Object key0 = null;
Object updated0 = null;
// Calculate new value.
if (op == GridCacheOperation.TRANSFORM) {
transformCloClsName = EntryProcessorResourceInjectorProxy.unwrap(writeObj).getClass().getName();
EntryProcessor<Object, Object, ?> entryProcessor = (EntryProcessor<Object, Object, ?>) writeObj;
assert entryProcessor != null;
CacheInvokeEntry<Object, Object> entry = new CacheInvokeEntry<>(key, old, version(), keepBinary, this);
try {
Object computed = entryProcessor.process(entry, invokeArgs);
if (entry.modified()) {
updated0 = cctx.unwrapTemporary(entry.getValue());
updated = cctx.toCacheObject(updated0);
if (// no validation for remove case
updated != null)
cctx.validateKeyAndValue(key, updated);
} else
updated = old;
key0 = entry.key();
invokeRes = computed != null ? CacheInvokeResult.fromResult(cctx.unwrapTemporary(computed)) : null;
} catch (Exception e) {
updated = old;
invokeRes = CacheInvokeResult.fromError(e);
}
if (!entry.modified()) {
if (expiryPlc != null && !readFromStore && hasValueUnlocked())
updateTtl(expiryPlc);
return new GridTuple3<>(false, null, invokeRes);
}
} else
updated = (CacheObject) writeObj;
op = updated == null ? GridCacheOperation.DELETE : GridCacheOperation.UPDATE;
if (intercept) {
CacheLazyEntry e;
if (op == GridCacheOperation.UPDATE) {
updated0 = value(updated0, updated, keepBinary, false);
e = new CacheLazyEntry(cctx, key, key0, old, old0, keepBinary);
Object interceptorVal = cctx.config().getInterceptor().onBeforePut(e, updated0);
if (interceptorVal == null)
return new GridTuple3<>(false, cctx.unwrapTemporary(value(old0, old, keepBinary, false)), invokeRes);
else {
updated0 = cctx.unwrapTemporary(interceptorVal);
updated = cctx.toCacheObject(updated0);
}
} else {
e = new CacheLazyEntry(cctx, key, key0, old, old0, keepBinary);
interceptorRes = cctx.config().getInterceptor().onBeforeRemove(e);
if (cctx.cancelRemove(interceptorRes))
return new GridTuple3<>(false, cctx.unwrapTemporary(interceptorRes.get2()), invokeRes);
}
key0 = e.key();
old0 = e.value();
}
boolean hadVal = hasValueUnlocked();
long ttl = CU.TTL_ETERNAL;
long expireTime = CU.EXPIRE_TIME_ETERNAL;
if (op == GridCacheOperation.UPDATE) {
if (expiryPlc != null) {
ttl = CU.toTtl(hadVal ? expiryPlc.getExpiryForUpdate() : expiryPlc.getExpiryForCreation());
if (ttl == CU.TTL_NOT_CHANGED) {
ttl = ttlExtras();
expireTime = expireTimeExtras();
} else if (ttl != CU.TTL_ZERO)
expireTime = CU.toExpireTime(ttl);
} else {
ttl = ttlExtras();
expireTime = expireTimeExtras();
}
}
if (ttl == CU.TTL_ZERO)
op = GridCacheOperation.DELETE;
// Try write-through.
if (op == GridCacheOperation.UPDATE) {
// Detach value before index update.
updated = cctx.kernalContext().cacheObjects().prepareForCache(updated, cctx);
if (writeThrough)
// Must persist inside synchronization in non-tx mode.
cctx.store().put(null, key, updated, ver);
storeValue(updated, expireTime, ver, oldRow);
assert ttl != CU.TTL_ZERO;
update(updated, expireTime, ttl, ver, true);
if (evt) {
CacheObject evtOld = null;
if (transformCloClsName != null && cctx.events().isRecordable(EVT_CACHE_OBJECT_READ)) {
evtOld = cctx.unwrapTemporary(old);
cctx.events().addEvent(partition(), key, cctx.localNodeId(), null, (GridCacheVersion) null, EVT_CACHE_OBJECT_READ, evtOld, evtOld != null || hadVal, evtOld, evtOld != null || hadVal, subjId, transformCloClsName, taskName, keepBinary);
}
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_PUT)) {
if (evtOld == null)
evtOld = cctx.unwrapTemporary(old);
cctx.events().addEvent(partition(), key, cctx.localNodeId(), null, (GridCacheVersion) null, EVT_CACHE_OBJECT_PUT, updated, updated != null, evtOld, evtOld != null || hadVal, subjId, null, taskName, keepBinary);
}
}
} else {
if (writeThrough)
// Must persist inside synchronization in non-tx mode.
cctx.store().remove(null, key);
removeValue();
update(null, CU.TTL_ETERNAL, CU.EXPIRE_TIME_ETERNAL, ver, true);
if (evt) {
CacheObject evtOld = null;
if (transformCloClsName != null && cctx.events().isRecordable(EVT_CACHE_OBJECT_READ))
cctx.events().addEvent(partition(), key, cctx.localNodeId(), null, (GridCacheVersion) null, EVT_CACHE_OBJECT_READ, evtOld, evtOld != null || hadVal, evtOld, evtOld != null || hadVal, subjId, transformCloClsName, taskName, keepBinary);
if (cctx.events().isRecordable(EVT_CACHE_OBJECT_REMOVED)) {
if (evtOld == null)
evtOld = cctx.unwrapTemporary(old);
cctx.events().addEvent(partition(), key, cctx.localNodeId(), null, (GridCacheVersion) null, EVT_CACHE_OBJECT_REMOVED, null, false, evtOld, evtOld != null || hadVal, subjId, null, taskName, keepBinary);
}
}
res = hadVal;
}
if (res)
updateMetrics(op, metrics);
if (lsnrCol != null) {
long updateCntr = nextPartitionCounter(AffinityTopologyVersion.NONE, true, null);
cctx.continuousQueries().onEntryUpdated(lsnrCol, key, val, old, internal, partition(), true, false, updateCntr, null, AffinityTopologyVersion.NONE);
onUpdateFinished(updateCntr);
}
cctx.dataStructures().onEntryUpdated(key, op == GridCacheOperation.DELETE, keepBinary);
if (intercept) {
if (op == GridCacheOperation.UPDATE)
cctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(cctx, key, key0, updated, updated0, keepBinary, 0L));
else
cctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(cctx, key, key0, old, old0, keepBinary, 0L));
}
} finally {
unlockEntry();
}
return new GridTuple3<>(res, cctx.unwrapTemporary(interceptorRes != null ? interceptorRes.get2() : cctx.cacheObjectContext().unwrapBinaryIfNeeded(old, keepBinary, false)), invokeRes);
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IgniteCacheOffheapManagerImpl method cacheEntriesIterator.
/**
* @param primary {@code True} if need return primary entries.
* @param backup {@code True} if need return backup entries.
* @param topVer Topology version to use.
* @return Entries iterator.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
@Override
public <K, V> GridCloseableIterator<Cache.Entry<K, V>> cacheEntriesIterator(final GridCacheContext cctx, final boolean primary, final boolean backup, final AffinityTopologyVersion topVer, final boolean keepBinary) throws IgniteCheckedException {
final Iterator<CacheDataRow> it = cacheIterator(cctx.cacheId(), primary, backup, topVer);
return new GridCloseableIteratorAdapter<Cache.Entry<K, V>>() {
/**
*/
private CacheEntryImplEx next;
@Override
protected Cache.Entry<K, V> onNext() {
CacheEntryImplEx ret = next;
next = null;
return ret;
}
@Override
protected boolean onHasNext() {
if (next != null)
return true;
CacheDataRow nextRow = null;
if (it.hasNext())
nextRow = it.next();
if (nextRow != null) {
KeyCacheObject key = nextRow.key();
CacheObject val = nextRow.value();
Object key0 = cctx.unwrapBinaryIfNeeded(key, keepBinary, false);
Object val0 = cctx.unwrapBinaryIfNeeded(val, keepBinary, false);
next = new CacheEntryImplEx(key0, val0, nextRow.version());
return true;
}
return false;
}
};
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IgniteCacheOffheapManagerImpl method iterator.
/**
* @param cacheId Cache ID.
* @param dataIt Data store iterator.
* @return Rows iterator
*/
private GridCloseableIterator<CacheDataRow> iterator(final int cacheId, final Iterator<CacheDataStore> dataIt) {
return new GridCloseableIteratorAdapter<CacheDataRow>() {
/**
*/
private GridCursor<? extends CacheDataRow> cur;
/**
*/
private int curPart;
/**
*/
private CacheDataRow next;
@Override
protected CacheDataRow onNext() {
CacheDataRow res = next;
next = null;
return res;
}
@Override
protected boolean onHasNext() throws IgniteCheckedException {
if (next != null)
return true;
while (true) {
if (cur == null) {
if (dataIt.hasNext()) {
CacheDataStore ds = dataIt.next();
curPart = ds.partId();
cur = cacheId == CU.UNDEFINED_CACHE_ID ? ds.cursor() : ds.cursor(cacheId);
} else
break;
}
if (cur.next()) {
next = cur.get();
next.key().partition(curPart);
break;
} else
cur = null;
}
return next != null;
}
};
}
Aggregations