use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class GridDhtColocatedCache method loadAsync.
/**
* @param keys Keys to load.
* @param readThrough Read through flag.
* @param forcePrimary Force get from primary node flag.
* @param topVer Topology version.
* @param taskName Task name.
* @param deserializeBinary Deserialize binary flag.
* @param expiryPlc Expiry policy.
* @param skipVals Skip values flag.
* @param needVer If {@code true} returns values as tuples containing value and version.
* @param keepCacheObj Keep cache objects flag.
* @param txLbl Transaction label.
* @param mvccSnapshot Mvcc snapshot.
* @return Load future.
*/
public final IgniteInternalFuture<Map<K, V>> loadAsync(@Nullable Collection<KeyCacheObject> keys, boolean readThrough, boolean forcePrimary, AffinityTopologyVersion topVer, String taskName, boolean deserializeBinary, boolean recovery, @Nullable IgniteCacheExpiryPolicy expiryPlc, boolean skipVals, boolean needVer, boolean keepCacheObj, @Nullable String txLbl, @Nullable MvccSnapshot mvccSnapshot) {
assert (mvccSnapshot == null) == !ctx.mvccEnabled();
if (keys == null || keys.isEmpty())
return new GridFinishedFuture<>(Collections.<K, V>emptyMap());
if (expiryPlc == null)
expiryPlc = expiryPolicy(null);
// Optimization: try to resolve value locally and escape 'get future' creation.
if (!forcePrimary && ctx.config().isReadFromBackup() && ctx.affinityNode() && ctx.topology().lostPartitions().isEmpty()) {
ctx.shared().database().checkpointReadLock();
try {
Map<K, V> locVals = null;
boolean success = true;
boolean readNoEntry = ctx.readNoEntry(expiryPlc, false);
boolean evt = !skipVals;
for (KeyCacheObject key : keys) {
if (readNoEntry) {
CacheDataRow row = mvccSnapshot != null ? ctx.offheap().mvccRead(ctx, key, mvccSnapshot) : ctx.offheap().read(ctx, key);
if (row != null) {
long expireTime = row.expireTime();
if (expireTime == 0 || expireTime > U.currentTimeMillis()) {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, row.value(), skipVals, keepCacheObj, deserializeBinary, true, null, row.version(), 0, 0, needVer, U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())));
if (evt) {
ctx.events().readEvent(key, null, txLbl, row.value(), taskName, !deserializeBinary);
}
} else
success = false;
} else
success = false;
} else {
GridCacheEntryEx entry = null;
while (true) {
try {
entry = entryEx(key);
// If our DHT cache do has value, then we peek it.
if (entry != null) {
boolean isNew = entry.isNewLocked();
EntryGetResult getRes = null;
CacheObject v = null;
GridCacheVersion ver = null;
if (needVer) {
getRes = entry.innerGetVersioned(null, null, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, !deserializeBinary, null);
if (getRes != null) {
v = getRes.value();
ver = getRes.version();
}
} else {
v = entry.innerGet(null, null, /*read-through*/
false, /*update-metrics*/
false, /*event*/
evt, null, taskName, expiryPlc, !deserializeBinary);
}
// Entry was not in memory or in swap, so we remove it from cache.
if (v == null) {
GridCacheVersion obsoleteVer = nextVersion();
if (isNew && entry.markObsoleteIfEmpty(obsoleteVer))
removeEntry(entry);
success = false;
} else {
if (locVals == null)
locVals = U.newHashMap(keys.size());
ctx.addResult(locVals, key, v, skipVals, keepCacheObj, deserializeBinary, true, getRes, ver, 0, 0, needVer, U.deploymentClassLoader(ctx.kernalContext(), U.contextDeploymentClassLoaderId(ctx.kernalContext())));
}
} else
success = false;
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
// No-op, retry.
} catch (GridDhtInvalidPartitionException ignored) {
success = false;
// While.
break;
} finally {
if (entry != null)
entry.touch();
}
}
}
if (!success)
break;
else if (!skipVals && ctx.statisticsEnabled())
ctx.cache().metrics0().onRead(true);
}
if (success) {
sendTtlUpdateRequest(expiryPlc);
return new GridFinishedFuture<>(locVals);
}
} catch (IgniteCheckedException e) {
return new GridFinishedFuture<>(e);
} finally {
ctx.shared().database().checkpointReadUnlock();
}
}
if (expiryPlc != null)
expiryPlc.reset();
// Either reload or not all values are available locally.
GridPartitionedGetFuture<K, V> fut = new GridPartitionedGetFuture<>(ctx, keys, readThrough, forcePrimary, taskName, deserializeBinary, recovery, expiryPlc, skipVals, needVer, keepCacheObj, txLbl, mvccSnapshot, null);
fut.init(topVer);
return fut;
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IgniteRebalanceIteratorImpl method next.
/**
* {@inheritDoc}
*/
@Override
public synchronized CacheDataRow next() {
try {
if (cached != null) {
CacheDataRow res = cached;
cached = null;
return res;
}
return nextX();
} catch (IgniteCheckedException e) {
throw new IgniteException(e);
}
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IgniteRebalanceIteratorImpl method nextX.
/**
* {@inheritDoc}
*/
@Override
public synchronized CacheDataRow nextX() throws IgniteCheckedException {
if (historicalIterator != null && historicalIterator.hasNextX())
return historicalIterator.nextX();
if (current == null || !current.getValue().hasNextX())
throw new NoSuchElementException();
CacheDataRow result = current.getValue().nextX();
assert result.partition() == current.getKey();
advance();
return result;
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IgniteSnapshotManagerSelfTest method testSnapshotIteratorRandomizedLoader.
/**
* @throws Exception If fails.
*/
@Test
public void testSnapshotIteratorRandomizedLoader() throws Exception {
Random rnd = new Random();
int maxKey = 15_000;
int maxValSize = 32_768;
int loadingTimeMs = 30_000;
CacheConfiguration<Integer, Value> ccfg = txCacheConfig(new CacheConfiguration<Integer, Value>("tx1")).setAffinity(new RendezvousAffinityFunction(false, 1));
IgniteEx ignite = startGridsWithCache(1, CACHE_KEYS_RANGE, k -> new Value(new byte[1024]), ccfg);
IgniteCache<Integer, Value> cache = ignite.cache(ccfg.getName());
long startTime = U.currentTimeMillis();
IgniteInternalFuture<?> fut = GridTestUtils.runMultiThreadedAsync(() -> {
while (!Thread.currentThread().isInterrupted() && startTime + loadingTimeMs > U.currentTimeMillis()) {
if (rnd.nextBoolean())
cache.put(rnd.nextInt(maxKey), new Value(new byte[rnd.nextInt(maxValSize)]));
else
cache.remove(rnd.nextInt(maxKey));
}
}, 10, "change-loader-");
fut.get();
ignite.snapshot().createSnapshot(SNAPSHOT_NAME).get();
Map<Integer, Value> iterated = new HashMap<>();
try (GridCloseableIterator<CacheDataRow> iter = snp(ignite).partitionRowIterator(ignite.context(), SNAPSHOT_NAME, ignite.context().pdsFolderResolver().resolveFolders().folderName(), ccfg.getName(), 0)) {
CacheObjectContext coctx = ignite.cachex(ccfg.getName()).context().cacheObjectContext();
while (iter.hasNext()) {
CacheDataRow row = iter.next();
iterated.put(row.key().value(coctx, true), row.value().value(coctx, true));
}
}
stopAllGrids();
IgniteEx snpIgnite = startGridsFromSnapshot(1, SNAPSHOT_NAME);
IgniteCache<Integer, Value> snpCache = snpIgnite.cache(ccfg.getName());
assertEquals(snpCache.size(CachePeekMode.PRIMARY), iterated.size());
snpCache.forEach(e -> {
Value val = iterated.remove(e.getKey());
assertNotNull(val);
assertEquals(val.arr().length, e.getValue().arr().length);
});
assertTrue(iterated.isEmpty());
}
use of org.apache.ignite.internal.processors.cache.persistence.CacheDataRow in project ignite by apache.
the class IgniteClusterSnapshotCheckTest method testClusterSnapshotCheckFailsOnPartitionDataDiffers.
/**
* @throws Exception If fails.
*/
@Test
public void testClusterSnapshotCheckFailsOnPartitionDataDiffers() throws Exception {
CacheConfiguration<Integer, Value> ccfg = txCacheConfig(new CacheConfiguration<Integer, Value>(DEFAULT_CACHE_NAME)).setAffinity(new RendezvousAffinityFunction(false, 1));
IgniteEx ignite = startGridsWithoutCache(2);
ignite.getOrCreateCache(ccfg).put(1, new Value(new byte[2000]));
forceCheckpoint(ignite);
GridCacheSharedContext<?, ?> cctx = ignite.context().cache().context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) cctx.database();
BinaryContext binCtx = ((CacheObjectBinaryProcessorImpl) ignite.context().cacheObjects()).binaryContext();
GridCacheAdapter<?, ?> cache = ignite.context().cache().internalCache(dfltCacheCfg.getName());
long partCtr = cache.context().topology().localPartition(PART_ID, NONE, false).dataStore().updateCounter();
AtomicBoolean done = new AtomicBoolean();
db.addCheckpointListener(new CheckpointListener() {
@Override
public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
// Change the cache value only at on of the cluster node to get hash conflict when the check command ends.
if (!done.compareAndSet(false, true))
return;
GridIterator<CacheDataRow> it = cache.context().offheap().partitionIterator(PART_ID);
assertTrue(it.hasNext());
CacheDataRow row0 = it.nextX();
AffinityTopologyVersion topVer = cctx.exchange().readyAffinityVersion();
GridCacheEntryEx cached = cache.entryEx(row0.key(), topVer);
byte[] bytes = new byte[2000];
new Random().nextBytes(bytes);
try {
BinaryObjectImpl newVal = new BinaryObjectImpl(binCtx, binCtx.marshaller().marshal(new Value(bytes)), 0);
boolean success = cached.initialValue(newVal, new GridCacheVersion(row0.version().topologyVersion(), row0.version().nodeOrder(), row0.version().order() + 1), null, null, TxState.NA, TxState.NA, TTL_ETERNAL, row0.expireTime(), true, topVer, DR_NONE, false, false, null);
assertTrue(success);
long newPartCtr = cache.context().topology().localPartition(PART_ID, NONE, false).dataStore().updateCounter();
assertEquals(newPartCtr, partCtr);
} catch (Exception e) {
throw new IgniteCheckedException(e);
}
}
@Override
public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
}
@Override
public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException {
}
});
db.waitForCheckpoint("test-checkpoint");
ignite.snapshot().createSnapshot(SNAPSHOT_NAME).get();
Path part0 = U.searchFileRecursively(snp(ignite).snapshotLocalDir(SNAPSHOT_NAME).toPath(), getPartitionFileName(PART_ID));
assertNotNull(part0);
assertTrue(part0.toString(), part0.toFile().exists());
IdleVerifyResultV2 res = snp(ignite).checkSnapshot(SNAPSHOT_NAME).get();
StringBuilder b = new StringBuilder();
res.print(b::append, true);
assertTrue(F.isEmpty(res.exceptions()));
assertContains(log, b.toString(), "The check procedure has failed, conflict partitions has been found: [counterConflicts=0, hashConflicts=1]");
}
Aggregations