use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class IgniteTxConcurrentRemoveObjectsTest method checkTxLeavesObjectsInLocalPartition.
/**
* Too many deletes in single transaction may overflow {@link GridDhtLocalPartition#rmvQueue} and entries will be
* deleted synchronously in {@link GridDhtLocalPartition#onDeferredDelete(int, KeyCacheObject, GridCacheVersion)}.
* This should not corrupt internal map state in {@link GridDhtLocalPartition}.
*
* @throws Exception If failed.
*/
public void checkTxLeavesObjectsInLocalPartition(CacheConfiguration<Integer, String> ccfg, TransactionConcurrency optimistic, TransactionIsolation isolation) throws Exception {
IgniteEx igniteEx = grid(0);
igniteEx.getOrCreateCache(ccfg);
try (IgniteDataStreamer<Integer, String> dataStreamer = igniteEx.dataStreamer(DEFAULT_CACHE_NAME)) {
for (int i = 0; i < CACHE_ENTRIES_COUNT; i++) dataStreamer.addData(i, UUID.randomUUID().toString());
}
IgniteEx client = startClientGrid(getConfiguration().setIgniteInstanceName(UUID.randomUUID().toString()));
awaitPartitionMapExchange();
assertEquals(CACHE_ENTRIES_COUNT, client.getOrCreateCache(DEFAULT_CACHE_NAME).size());
try (Transaction tx = client.transactions().txStart(optimistic, isolation)) {
IgniteCache<Integer, String> cache = client.getOrCreateCache(cacheConfiguration());
for (int v = 0; v < CACHE_ENTRIES_COUNT; v++) {
cache.get(v);
cache.remove(v);
}
tx.commit();
}
GridTestUtils.waitForCondition(() -> igniteEx.context().cache().cacheGroups().stream().filter(CacheGroupContext::userCache).flatMap(cgctx -> cgctx.topology().localPartitions().stream()).mapToInt(GridDhtLocalPartition::internalSize).max().orElse(-1) == 0, 500L);
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method checkDataWalEntries.
/**
* @throws Exception if failed.
*/
private void checkDataWalEntries(boolean mvcc) throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
GridCacheContext<Object, Object> cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
IgniteWriteAheadLogManager wal = sharedCtx.wal();
assertTrue(wal.isAlwaysWriteFullPages());
db.enableCheckpoints(false).get();
final int cnt = 10;
List<DataEntry> entries = new ArrayList<>(cnt);
for (int i = 0; i < cnt; i++) {
GridCacheOperation op = i % 2 == 0 ? GridCacheOperation.UPDATE : GridCacheOperation.DELETE;
KeyCacheObject key = cctx.toCacheKeyObject(i);
CacheObject val = null;
if (op != GridCacheOperation.DELETE)
val = cctx.toCacheObject("value-" + i);
entries.add(mvcc ? new MvccDataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, new MvccVersionImpl(1000L, 10L, i + 1)) : new DataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, DataEntry.EMPTY_FLAGS));
}
UUID cpId = UUID.randomUUID();
WALPointer start = wal.log(new CheckpointRecord(cpId, null));
wal.flush(start, false);
for (DataEntry entry : entries) wal.log(mvcc ? new MvccDataRecord((MvccDataEntry) entry) : new DataRecord(entry));
// Data will not be written to the page store.
stopAllGrids();
ig = startGrid(0);
ig.cluster().active(true);
sharedCtx = ig.context().cache().context();
cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
db = (GridCacheDatabaseSharedManager) sharedCtx.database();
wal = sharedCtx.wal();
db.enableCheckpoints(false).get();
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> cpRecordTup = it.next();
assert cpRecordTup.get2() instanceof CheckpointRecord;
assertEquals(start, cpRecordTup.get1());
CheckpointRecord cpRec = (CheckpointRecord) cpRecordTup.get2();
assertEquals(cpId, cpRec.checkpointId());
assertNull(cpRec.checkpointMark());
assertFalse(cpRec.end());
int idx = 0;
CacheObjectContext coctx = cctx.cacheObjectContext();
while (idx < entries.size()) {
IgniteBiTuple<WALPointer, WALRecord> dataRecTup = it.next();
if (!mvcc)
assert dataRecTup.get2() instanceof DataRecord;
else
assert dataRecTup.get2() instanceof MvccDataRecord;
DataRecord dataRec = (DataRecord) dataRecTup.get2();
DataEntry entry = entries.get(idx);
assertEquals(1, dataRec.entryCount());
DataEntry readEntry = dataRec.get(0);
assertEquals(entry.cacheId(), readEntry.cacheId());
assertEquals(entry.key().<Integer>value(coctx, true), readEntry.key().<Integer>value(coctx, true));
assertEquals(entry.op(), readEntry.op());
if (entry.op() == GridCacheOperation.UPDATE)
assertEquals(entry.value().value(coctx, true), readEntry.value().value(coctx, true));
else
assertNull(entry.value());
assertEquals(entry.writeVersion(), readEntry.writeVersion());
assertEquals(entry.nearXidVersion(), readEntry.nearXidVersion());
assertEquals(entry.partitionCounter(), readEntry.partitionCounter());
if (mvcc) {
assert entry instanceof MvccDataEntry;
assert readEntry instanceof MvccDataEntry;
assertEquals(((MvccDataEntry) entry).mvccVer(), ((MvccDataEntry) readEntry).mvccVer());
}
idx++;
}
}
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class H2PkHashIndex method find.
/**
* {@inheritDoc}
*/
@Override
public Cursor find(Session ses, final SearchRow lower, final SearchRow upper) {
IndexingQueryCacheFilter filter = null;
MvccSnapshot mvccSnapshot = null;
QueryContext qctx = H2Utils.context(ses);
int seg = 0;
if (qctx != null) {
IndexingQueryFilter f = qctx.filter();
filter = f != null ? f.forCache(getTable().cacheName()) : null;
mvccSnapshot = qctx.mvccSnapshot();
seg = qctx.segment();
}
assert !cctx.mvccEnabled() || mvccSnapshot != null;
KeyCacheObject lowerObj = lower != null ? cctx.toCacheKeyObject(lower.getValue(0).getObject()) : null;
KeyCacheObject upperObj = upper != null ? cctx.toCacheKeyObject(upper.getValue(0).getObject()) : null;
try {
CacheDataRowStore.setSkipVersion(true);
Collection<GridCursor<? extends CacheDataRow>> cursors = new ArrayList<>();
for (IgniteCacheOffheapManager.CacheDataStore store : cctx.offheap().cacheDataStores()) {
int part = store.partId();
if (segmentForPartition(part) != seg)
continue;
if (filter == null || filter.applyPartition(part))
cursors.add(store.cursor(cctx.cacheId(), lowerObj, upperObj, null, mvccSnapshot));
}
return new H2PkHashIndexCursor(cursors.iterator());
} catch (IgniteCheckedException e) {
throw DbException.convert(e);
} finally {
CacheDataRowStore.setSkipVersion(false);
}
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class CacheMvccTransactionsTest method testInternalApi.
/**
* @throws IgniteCheckedException If failed.
*/
@Test
public void testInternalApi() throws Exception {
Ignite node = startGrid(0);
IgniteCache cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 1));
GridCacheContext cctx = ((IgniteKernal) node).context().cache().context().cacheContext(CU.cacheId(cache.getName()));
MvccProcessorImpl crd = mvccProcessor(node);
// Start query to prevent cleanup.
MvccSnapshotFuture fut = new MvccSnapshotFuture();
crd.requestReadSnapshotAsync(crd.currentCoordinator(), fut);
fut.get();
final int KEYS = 1000;
for (int i = 0; i < 10; i++) {
for (int k = 0; k < KEYS; k++) {
final Integer key = k;
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.put(key, i);
tx.commit();
}
}
}
for (int k = 0; k < KEYS; k++) {
final Integer key = k;
KeyCacheObject key0 = cctx.toCacheKeyObject(key);
List<IgniteBiTuple<Object, MvccVersion>> vers = cctx.offheap().mvccAllVersions(cctx, key0);
assertEquals(10, vers.size());
CacheDataRow row = cctx.offheap().read(cctx, key0);
Object val = ((CacheObject) vers.get(0).get1()).value(cctx.cacheObjectContext(), false);
checkRow(cctx, row, key0, val);
for (IgniteBiTuple<Object, MvccVersion> ver : vers) {
MvccVersion cntr = ver.get2();
MvccSnapshot readVer = new MvccSnapshotWithoutTxs(cntr.coordinatorVersion(), cntr.counter(), MvccUtils.MVCC_READ_OP_CNTR, 0);
row = cctx.offheap().mvccRead(cctx, key0, readVer);
Object verVal = ((CacheObject) ver.get1()).value(cctx.cacheObjectContext(), false);
checkRow(cctx, row, key0, verVal);
}
checkRow(cctx, cctx.offheap().mvccRead(cctx, key0, version(vers.get(0).get2().coordinatorVersion() + 1, 1)), key0, val);
checkRow(cctx, cctx.offheap().mvccRead(cctx, key0, version(vers.get(0).get2().coordinatorVersion(), vers.get(0).get2().counter() + 1)), key0, val);
MvccSnapshotResponse ver = version(vers.get(0).get2().coordinatorVersion(), 100000);
for (int v = 0; v < vers.size(); v++) {
MvccVersion cntr = vers.get(v).get2();
ver.addTx(cntr.counter());
row = cctx.offheap().mvccRead(cctx, key0, ver);
if (v == vers.size() - 1)
assertNull(row);
else {
Object nextVal = ((CacheObject) vers.get(v + 1).get1()).value(cctx.cacheObjectContext(), false);
checkRow(cctx, row, key0, nextVal);
}
}
}
KeyCacheObject key = cctx.toCacheKeyObject(KEYS);
cache.put(key, 0);
cache.remove(key);
cctx.offheap().mvccRemoveAll((GridCacheMapEntry) cctx.cache().entryEx(key));
crd.ackQueryDone(fut.get(), MVCC_TRACKER_ID_NA);
}
use of org.apache.ignite.internal.processors.cache.KeyCacheObject in project ignite by apache.
the class IgniteWalReaderTest method iterateAndCount.
/**
* Iterates on records and closes iterator.
*
* @param walIter iterator to count, will be closed.
* @return count of records.
* @throws IgniteCheckedException if failed to iterate.
*/
private int iterateAndCount(WALIterator walIter) throws IgniteCheckedException {
int cnt = 0;
try (WALIterator it = walIter) {
while (it.hasNextX()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.nextX();
WALRecord walRecord = tup.get2();
if (walRecord.type() == DATA_RECORD_V2 || walRecord.type() == MVCC_DATA_RECORD) {
DataRecord record = (DataRecord) walRecord;
for (int i = 0; i < record.entryCount(); i++) {
DataEntry entry = record.get(i);
KeyCacheObject key = entry.key();
CacheObject val = entry.value();
if (walRecord.type() == DATA_RECORD_V2) {
assertEquals(primary, (entry.flags() & DataEntry.PRIMARY_FLAG) != 0);
assertEquals(rebalance, (entry.flags() & DataEntry.PRELOAD_FLAG) != 0);
}
if (DUMP_RECORDS)
log.info("Op: " + entry.op() + ", Key: " + key + ", Value: " + val);
}
}
if (DUMP_RECORDS)
log.info("Record: " + walRecord);
cnt++;
}
}
return cnt;
}
Aggregations