use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridLocalAtomicCache method getAllInternal.
/**
* Entry point to all public API get methods.
*
* @param keys Keys to remove.
* @param storeEnabled Store enabled flag.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary .
* @param skipVals Skip value flag.
* @param needVer Need version.
* @return Key-value map.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("ConstantConditions")
private Map<K, V> getAllInternal(@Nullable Collection<? extends K> keys, boolean storeEnabled, String taskName, boolean deserializeBinary, boolean skipVals, boolean needVer) throws IgniteCheckedException {
ctx.checkSecurity(SecurityPermission.CACHE_READ);
if (F.isEmpty(keys))
return Collections.emptyMap();
CacheOperationContext opCtx = ctx.operationContextPerCall();
Map<K, V> vals = U.newHashMap(keys.size());
warnIfUnordered(keys, BulkOperation.GET);
final IgniteCacheExpiryPolicy expiry = expiryPolicy(opCtx != null ? opCtx.expiry() : null);
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiry, false);
final boolean evt = !skipVals;
ctx.shared().database().checkpointReadLock();
try {
for (K key : keys) {
if (key == null)
throw new NullPointerException("Null key.");
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
boolean skipEntry = readNoEntry;
if (readNoEntry) {
CacheDataRow row = ctx.offheap().read(ctx, cacheKey);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
ctx.addResult(vals, cacheKey, row.value(), skipVals, false, deserializeBinary, true, null, row.version(), 0, 0, needVer, null);
if (ctx.statisticsEnabled() && !skipVals)
metrics0().onRead(true);
if (evt) {
ctx.events().readEvent(cacheKey, null, null, row.value(), taskName, !deserializeBinary);
}
} else
skipEntry = false;
} else
success = false;
}
if (!skipEntry) {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(cacheKey);
if (entry != null) {
CacheObject v;
if (needVer) {
EntryGetResult res = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiry, !deserializeBinary, null);
if (res != null) {
ctx.addResult(vals, cacheKey, res, skipVals, false, deserializeBinary, true, needVer);
} else
success = false;
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
true, /*event*/
evt, null, taskName, expiry, !deserializeBinary);
if (v != null) {
ctx.addResult(vals, cacheKey, v, skipVals, false, deserializeBinary, true, null, 0, 0, null);
} else
success = false;
}
}
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} finally {
if (entry != null)
entry.touch();
}
if (!success && storeEnabled)
break;
}
}
if (!success) {
if (!storeEnabled && ctx.statisticsEnabled() && !skipVals)
metrics0().onRead(false);
}
}
} finally {
ctx.shared().database().checkpointReadUnlock();
}
if (success || !storeEnabled)
return vals;
return getAllAsync(keys, null, opCtx == null || !opCtx.skipStore(), false, taskName, deserializeBinary, opCtx != null && opCtx.recovery(), null, /*force primary*/
false, expiry, skipVals, needVer).get();
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridIndexRebuildSelfTest method checkDataState.
/**
* Check versions presence in index tree.
*
* @param srv Node.
* @param afterRebuild Whether index rebuild has occurred.
* @throws IgniteCheckedException if failed.
*/
@SuppressWarnings({ "ConstantConditions", "unchecked" })
protected void checkDataState(IgniteEx srv, boolean afterRebuild) throws IgniteCheckedException {
IgniteInternalCache icache = srv.cachex(CACHE_NAME);
IgniteCache cache = srv.cache(CACHE_NAME);
assertNotNull(icache);
for (IgniteCacheOffheapManager.CacheDataStore store : icache.context().offheap().cacheDataStores()) {
GridCursor<? extends CacheDataRow> cur = store.cursor();
while (cur.next()) {
CacheDataRow row = cur.get();
int key = row.key().value(icache.context().cacheObjectContext(), false);
if (!afterRebuild || key <= AMOUNT / 2)
assertEquals(key, cache.get(key));
else
assertEquals(-1, cache.get(key));
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridIndexRebuildWithMvccEnabledSelfTest method checkDataState.
/**
* {@inheritDoc}
*/
@Override
protected void checkDataState(IgniteEx srv, boolean afterRebuild) throws IgniteCheckedException {
IgniteInternalCache icache = srv.cachex(CACHE_NAME);
assertNotNull(icache);
CacheObjectContext coCtx = icache.context().cacheObjectContext();
for (IgniteCacheOffheapManager.CacheDataStore store : icache.context().offheap().cacheDataStores()) {
GridCursor<? extends CacheDataRow> cur = store.cursor();
while (cur.next()) {
CacheDataRow row = cur.get();
int key = row.key().value(coCtx, false);
List<IgniteBiTuple<Object, MvccVersion>> vers = store.mvccFindAllVersions(icache.context(), row.key());
if (!afterRebuild || key <= AMOUNT / 2)
assertEquals(key, vers.size());
else {
// For keys affected by concurrent put there are two versions -
// -1 (concurrent put mark) and newest restored value as long as put cleans obsolete versions.
assertEquals(2, vers.size());
Object val0 = ((CacheObject) vers.get(0).getKey()).value(coCtx, false);
Object val1 = ((CacheObject) vers.get(1).getKey()).value(coCtx, false);
assertEquals(-1, val0);
assertEquals(key, val1);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IgnitePdsPartitionPreloadTest method preloadPartition.
/**
* @param execNodeFactory Test node factory.
* @param preloadMode Preload mode.
*/
private void preloadPartition(Supplier<Ignite> execNodeFactory, PreloadMode preloadMode) throws Exception {
Ignite crd = startGridsMultiThreaded(GRIDS_CNT);
Ignite testNode = grid(1);
Object consistentId = testNode.cluster().localNode().consistentId();
assertEquals(PRIMARY_NODE, testNode.cluster().localNode().consistentId());
boolean locCacheMode = testNode.cache(DEFAULT_CACHE_NAME).getConfiguration(CacheConfiguration.class).getCacheMode() == LOCAL;
Integer key = primaryKey(testNode.cache(DEFAULT_CACHE_NAME));
int preloadPart = crd.affinity(DEFAULT_CACHE_NAME).partition(key);
int cnt = 0;
try (IgniteDataStreamer<Integer, Integer> streamer = testNode.dataStreamer(DEFAULT_CACHE_NAME)) {
int k = 0;
while (cnt < ENTRY_CNT) {
if (testNode.affinity(DEFAULT_CACHE_NAME).partition(k) == preloadPart) {
streamer.addData(k, k);
cnt++;
}
k++;
}
}
forceCheckpoint();
stopAllGrids();
startGridsMultiThreaded(GRIDS_CNT);
testNode = G.allGrids().stream().filter(ignite -> PRIMARY_NODE.equals(ignite.cluster().localNode().consistentId())).findFirst().get();
if (!locCacheMode)
assertEquals(testNode, primaryNode(key, DEFAULT_CACHE_NAME));
Ignite execNode = execNodeFactory.get();
switch(preloadMode) {
case SYNC:
execNode.cache(DEFAULT_CACHE_NAME).preloadPartition(preloadPart);
if (locCacheMode) {
testNode = G.allGrids().stream().filter(ignite -> ignite.cluster().localNode().consistentId().equals(consistentId)).findFirst().get();
}
break;
case ASYNC:
execNode.cache(DEFAULT_CACHE_NAME).preloadPartitionAsync(preloadPart).get();
if (locCacheMode) {
testNode = G.allGrids().stream().filter(ignite -> ignite.cluster().localNode().consistentId().equals(consistentId)).findFirst().get();
}
break;
case LOCAL:
assertTrue(execNode.cache(DEFAULT_CACHE_NAME).localPreloadPartition(preloadPart));
// For local preloading testNode == execNode
testNode = execNode;
break;
}
long c0 = testNode.dataRegionMetrics(DEFAULT_REGION).getPagesRead();
// After partition preloading no pages should be read from store.
GridIterator<CacheDataRow> cursor = ((IgniteEx) testNode).cachex(DEFAULT_CACHE_NAME).context().offheap().cachePartitionIterator(CU.UNDEFINED_CACHE_ID, preloadPart, null, false);
int realSize = 0;
while (cursor.hasNext()) {
realSize++;
cursor.next();
}
assertEquals("Partition has missed some entries", ENTRY_CNT, realSize);
assertEquals("Read pages count must be same", c0, testNode.dataRegionMetrics(DEFAULT_REGION).getPagesRead());
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class CacheMvccSqlTxQueriesAbstractTest method checkAllVersionsHints.
/**
*/
private void checkAllVersionsHints(IgniteCache cache) throws IgniteCheckedException {
IgniteCacheProxy cache0 = (IgniteCacheProxy) cache;
GridCacheContext cctx = cache0.context();
assert cctx.mvccEnabled();
for (Object e : cache) {
IgniteBiTuple entry = (IgniteBiTuple) e;
KeyCacheObject key = cctx.toCacheKeyObject(entry.getKey());
GridCursor<CacheDataRow> cur = cctx.offheap().mvccAllVersionsCursor(cctx, key, CacheDataRowAdapter.RowData.LINK_WITH_HEADER);
while (cur.next()) {
CacheDataRow row = cur.get();
assertTrue(row.mvccTxState() != 0);
}
}
}
Aggregations