use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class H2PkHashIndex method find.
/**
* {@inheritDoc}
*/
@Override
public Cursor find(Session ses, final SearchRow lower, final SearchRow upper) {
IndexingQueryCacheFilter filter = null;
MvccSnapshot mvccSnapshot = null;
QueryContext qctx = H2Utils.context(ses);
int seg = 0;
if (qctx != null) {
IndexingQueryFilter f = qctx.filter();
filter = f != null ? f.forCache(getTable().cacheName()) : null;
mvccSnapshot = qctx.mvccSnapshot();
seg = qctx.segment();
}
assert !cctx.mvccEnabled() || mvccSnapshot != null;
KeyCacheObject lowerObj = lower != null ? cctx.toCacheKeyObject(lower.getValue(0).getObject()) : null;
KeyCacheObject upperObj = upper != null ? cctx.toCacheKeyObject(upper.getValue(0).getObject()) : null;
try {
CacheDataRowStore.setSkipVersion(true);
Collection<GridCursor<? extends CacheDataRow>> cursors = new ArrayList<>();
for (IgniteCacheOffheapManager.CacheDataStore store : cctx.offheap().cacheDataStores()) {
int part = store.partId();
if (segmentForPartition(part) != seg)
continue;
if (filter == null || filter.applyPartition(part))
cursors.add(store.cursor(cctx.cacheId(), lowerObj, upperObj, null, mvccSnapshot));
}
return new H2PkHashIndexCursor(cursors.iterator());
} catch (IgniteCheckedException e) {
throw DbException.convert(e);
} finally {
CacheDataRowStore.setSkipVersion(false);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class H2PkHashIndex method totalRowCount.
/**
* {@inheritDoc}
*/
@Override
public long totalRowCount(IndexingQueryCacheFilter partsFilter) {
CacheDataRowStore.setSkipVersion(true);
try {
Collection<GridCursor<? extends CacheDataRow>> cursors = new ArrayList<>();
for (IgniteCacheOffheapManager.CacheDataStore store : cctx.offheap().cacheDataStores()) {
int part = store.partId();
if (partsFilter == null || partsFilter.applyPartition(part))
cursors.add(store.cursor(cctx.cacheId()));
}
Cursor pkHashCursor = new H2PkHashIndexCursor(cursors.iterator());
long res = 0;
while (pkHashCursor.next()) res++;
return res;
} catch (IgniteCheckedException e) {
throw U.convertException(e);
} finally {
CacheDataRowStore.setSkipVersion(false);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IgniteSnapshotManagerSelfTest method testSnapshotIterator.
/**
* @throws Exception If fails
*/
@Test
public void testSnapshotIterator() throws Exception {
int keys = 127;
IgniteEx ignite = startGridsWithCache(2, dfltCacheCfg.setAffinity(new RendezvousAffinityFunction(false, 1)), keys);
ignite.snapshot().createSnapshot(SNAPSHOT_NAME).get();
int rows = 0;
try (GridCloseableIterator<CacheDataRow> iter = snp(ignite).partitionRowIterator(ignite.context(), SNAPSHOT_NAME, ignite.context().pdsFolderResolver().resolveFolders().folderName(), dfltCacheCfg.getName(), 0)) {
CacheObjectContext coctx = ignite.cachex(dfltCacheCfg.getName()).context().cacheObjectContext();
while (iter.hasNext()) {
CacheDataRow row = iter.next();
// Invariant for cache: cache key always equals to cache value.
assertEquals("Invalid key/value pair [key=" + row.key() + ", val=" + row.value() + ']', row.key().value(coctx, false, U.resolveClassLoader(ignite.configuration())), (Integer) row.value().value(coctx, false));
rows++;
}
}
assertEquals("Invalid number of rows: " + rows, keys, rows);
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class CacheFreeListSelfTest method checkInsertDeleteMultiThreaded.
/**
* @param pageSize Page size.
* @param batched Batch mode flag.
* @throws Exception If failed.
*/
protected void checkInsertDeleteMultiThreaded(final int pageSize, final boolean batched) throws Exception {
final FreeList<CacheDataRow> list = createFreeList(pageSize);
Random rnd = new Random();
final ConcurrentMap<Long, CacheDataRow> stored = new ConcurrentHashMap<>();
for (int i = 0; i < 100; i++) {
int keySize = rnd.nextInt(pageSize * 3 / 2) + 10;
int valSize = rnd.nextInt(pageSize * 5 / 2) + 10;
TestDataRow row = new TestDataRow(keySize, valSize);
list.insertDataRow(row, IoStatisticsHolderNoOp.INSTANCE);
assertTrue(row.link() != 0L);
CacheDataRow old = stored.put(row.link(), row);
assertNull(old);
}
final AtomicBoolean grow = new AtomicBoolean(true);
GridTestUtils.runMultiThreaded(new Callable<Object>() {
@Override
public Object call() throws Exception {
List<CacheDataRow> rows = new ArrayList<>(BATCH_SIZE);
Random rnd = ThreadLocalRandom.current();
for (int i = 0; i < 200_000; i++) {
boolean grow0 = grow.get();
if (grow0) {
if (stored.size() > 20_000) {
if (grow.compareAndSet(true, false))
info("Shrink... [" + stored.size() + ']');
grow0 = false;
}
} else {
if (stored.size() < 1_000) {
if (grow.compareAndSet(false, true))
info("Grow... [" + stored.size() + ']');
grow0 = true;
}
}
boolean insert = rnd.nextInt(100) < 70 == grow0;
if (insert) {
int keySize = rnd.nextInt(pageSize * 3 / 2) + 10;
int valSize = rnd.nextInt(pageSize * 3 / 2) + 10;
TestDataRow row = new TestDataRow(keySize, valSize);
if (batched) {
rows.add(row);
if (rows.size() == BATCH_SIZE) {
list.insertDataRows(rows, IoStatisticsHolderNoOp.INSTANCE);
for (CacheDataRow row0 : rows) {
assertTrue(row0.link() != 0L);
CacheDataRow old = stored.put(row0.link(), row0);
assertNull(old);
}
rows.clear();
}
continue;
}
list.insertDataRow(row, IoStatisticsHolderNoOp.INSTANCE);
assertTrue(row.link() != 0L);
CacheDataRow old = stored.put(row.link(), row);
assertNull(old);
} else {
while (!stored.isEmpty()) {
Iterator<CacheDataRow> it = stored.values().iterator();
if (it.hasNext()) {
CacheDataRow row = it.next();
CacheDataRow rmvd = stored.remove(row.link());
if (rmvd != null) {
list.removeDataRowByLink(row.link(), IoStatisticsHolderNoOp.INSTANCE);
break;
}
}
}
}
}
return null;
}
}, 8, "runner");
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class CacheMvccTransactionsTest method testInternalApi.
/**
* @throws IgniteCheckedException If failed.
*/
@Test
public void testInternalApi() throws Exception {
Ignite node = startGrid(0);
IgniteCache cache = node.createCache(cacheConfiguration(PARTITIONED, FULL_SYNC, 0, 1));
GridCacheContext cctx = ((IgniteKernal) node).context().cache().context().cacheContext(CU.cacheId(cache.getName()));
MvccProcessorImpl crd = mvccProcessor(node);
// Start query to prevent cleanup.
MvccSnapshotFuture fut = new MvccSnapshotFuture();
crd.requestReadSnapshotAsync(crd.currentCoordinator(), fut);
fut.get();
final int KEYS = 1000;
for (int i = 0; i < 10; i++) {
for (int k = 0; k < KEYS; k++) {
final Integer key = k;
try (Transaction tx = node.transactions().txStart(PESSIMISTIC, REPEATABLE_READ)) {
cache.put(key, i);
tx.commit();
}
}
}
for (int k = 0; k < KEYS; k++) {
final Integer key = k;
KeyCacheObject key0 = cctx.toCacheKeyObject(key);
List<IgniteBiTuple<Object, MvccVersion>> vers = cctx.offheap().mvccAllVersions(cctx, key0);
assertEquals(10, vers.size());
CacheDataRow row = cctx.offheap().read(cctx, key0);
Object val = ((CacheObject) vers.get(0).get1()).value(cctx.cacheObjectContext(), false);
checkRow(cctx, row, key0, val);
for (IgniteBiTuple<Object, MvccVersion> ver : vers) {
MvccVersion cntr = ver.get2();
MvccSnapshot readVer = new MvccSnapshotWithoutTxs(cntr.coordinatorVersion(), cntr.counter(), MvccUtils.MVCC_READ_OP_CNTR, 0);
row = cctx.offheap().mvccRead(cctx, key0, readVer);
Object verVal = ((CacheObject) ver.get1()).value(cctx.cacheObjectContext(), false);
checkRow(cctx, row, key0, verVal);
}
checkRow(cctx, cctx.offheap().mvccRead(cctx, key0, version(vers.get(0).get2().coordinatorVersion() + 1, 1)), key0, val);
checkRow(cctx, cctx.offheap().mvccRead(cctx, key0, version(vers.get(0).get2().coordinatorVersion(), vers.get(0).get2().counter() + 1)), key0, val);
MvccSnapshotResponse ver = version(vers.get(0).get2().coordinatorVersion(), 100000);
for (int v = 0; v < vers.size(); v++) {
MvccVersion cntr = vers.get(v).get2();
ver.addTx(cntr.counter());
row = cctx.offheap().mvccRead(cctx, key0, ver);
if (v == vers.size() - 1)
assertNull(row);
else {
Object nextVal = ((CacheObject) vers.get(v + 1).get1()).value(cctx.cacheObjectContext(), false);
checkRow(cctx, row, key0, nextVal);
}
}
}
KeyCacheObject key = cctx.toCacheKeyObject(KEYS);
cache.put(key, 0);
cache.remove(key);
cctx.offheap().mvccRemoveAll((GridCacheMapEntry) cctx.cache().entryEx(key));
crd.ackQueryDone(fut.get(), MVCC_TRACKER_ID_NA);
}
Aggregations