use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class RebalanceStatisticsTest method testRebalanceStatistics.
/**
* Test statistics of a rebalance.
*
* Steps:
* 1)Creating and filling a cluster;
* 2)Starting a new node with listening for logs and supply messages;
* 3)Check that number of supply messages is equal to number of logs received +1;
* 4)Find corresponding message in log for each supply message;
* 5)Find log message after all of groups and to check its correctness.
*
* @throws Exception if any error occurs.
*/
@Test
public void testRebalanceStatistics() throws Exception {
createCluster(3);
ListeningTestLogger listeningTestLog = new ListeningTestLogger(log);
IgniteConfiguration cfg = getConfiguration(getTestIgniteInstanceName(3)).setGridLogger(listeningTestLog);
// Collect log messages with rebalance statistics.
Collection<String> logMsgs = new ConcurrentLinkedQueue<>();
listeningTestLog.registerListener(new CallbackExecutorLogListener("Completed( \\(final\\))? rebalanc(ing|e chain).*", logMsgs::add));
Map<Ignite, Collection<T2<ClusterNode, Message>>> recordMsgs = new ConcurrentHashMap<>();
G.allGrids().forEach(n -> TestRecordingCommunicationSpi.spi(n).record((node, msg) -> {
if (GridDhtPartitionSupplyMessage.class.isInstance(msg))
recordMsgs.computeIfAbsent(n, n1 -> new ConcurrentLinkedQueue<>()).add(new T2<>(node, msg));
return false;
}));
IgniteEx node = startGrid(cfg);
awaitPartitionMapExchange();
// Collect supply messages only for new node.
Map<Ignite, List<GridDhtPartitionSupplyMessage>> supplyMsgs = G.allGrids().stream().filter(n -> !n.equals(node)).collect(toMap(identity(), n -> recordMsgs.get(n).stream().filter(t2 -> t2.get1().id().equals(node.localNode().id())).map(IgniteBiTuple::get2).map(GridDhtPartitionSupplyMessage.class::cast).collect(toList())));
// +1 because one message about end of rebalance for all groups.
assertEquals(supplyMsgs.values().stream().mapToInt(List::size).sum() + 1, logMsgs.size());
IgniteClosure2X<GridCacheEntryInfo, CacheObjectContext, Long> getSize = new IgniteClosure2X<GridCacheEntryInfo, CacheObjectContext, Long>() {
/**
* {@inheritDoc}
*/
@Override
public Long applyx(GridCacheEntryInfo info, CacheObjectContext ctx) throws IgniteCheckedException {
return (long) info.marshalledSize(ctx);
}
};
for (Map.Entry<Ignite, List<GridDhtPartitionSupplyMessage>> supplyMsg : supplyMsgs.entrySet()) {
List<String> supplierMsgs = logMsgs.stream().filter(s -> s.contains("supplier=" + supplyMsg.getKey().cluster().localNode().id())).collect(toList());
List<GridDhtPartitionSupplyMessage> msgs = supplyMsg.getValue();
assertEquals(msgs.size(), supplierMsgs.size());
for (GridDhtPartitionSupplyMessage msg : msgs) {
Map<Integer, CacheEntryInfoCollection> infos = U.field(msg, "infos");
CacheGroupContext grpCtx = node.context().cache().cacheGroup(msg.groupId());
long bytes = 0;
for (CacheEntryInfoCollection c : infos.values()) {
for (GridCacheEntryInfo i : c.infos()) bytes += getSize.apply(i, grpCtx.cacheObjectContext());
}
String[] checVals = { "grp=" + grpCtx.cacheOrGroupName(), "partitions=" + infos.size(), "entries=" + infos.values().stream().mapToInt(i -> i.infos().size()).sum(), "topVer=" + msg.topologyVersion(), "rebalanceId=" + U.field(msg, "rebalanceId"), "bytesRcvd=" + U.humanReadableByteCount(bytes), "fullPartitions=" + infos.size(), "fullEntries=" + infos.values().stream().mapToInt(i -> i.infos().size()).sum(), "fullBytesRcvd=" + U.humanReadableByteCount(bytes), "histPartitions=0", "histEntries=0", "histBytesRcvd=0" };
assertTrue("msgs=" + supplierMsgs.toString() + ", checVals=" + asList(checVals).toString(), supplierMsgs.stream().anyMatch(s -> Stream.of(checVals).allMatch(s::contains)));
}
}
String rebChainMsg = logMsgs.stream().filter(s -> s.startsWith("Completed rebalance chain")).findAny().get();
long rebId = -1;
int parts = 0;
int entries = 0;
long bytes = 0;
for (List<GridDhtPartitionSupplyMessage> msgs : supplyMsgs.values()) {
for (GridDhtPartitionSupplyMessage msg : msgs) {
Map<Integer, CacheEntryInfoCollection> infos = U.field(msg, "infos");
rebId = U.field(msg, "rebalanceId");
parts += infos.size();
entries += infos.values().stream().mapToInt(i -> i.infos().size()).sum();
CacheObjectContext cacheObjCtx = node.context().cache().cacheGroup(msg.groupId()).cacheObjectContext();
for (CacheEntryInfoCollection c : infos.values()) {
for (GridCacheEntryInfo i : c.infos()) bytes += getSize.apply(i, cacheObjCtx);
}
}
}
String[] checVals = { "partitions=" + parts, "entries=" + entries, "rebalanceId=" + rebId, "bytesRcvd=" + U.humanReadableByteCount(bytes) };
assertTrue(rebChainMsg, Stream.of(checVals).allMatch(rebChainMsg::contains));
}
use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class GridIndexRebuildWithMvccEnabledSelfTest method checkDataState.
/**
* {@inheritDoc}
*/
@Override
protected void checkDataState(IgniteEx srv, boolean afterRebuild) throws IgniteCheckedException {
IgniteInternalCache icache = srv.cachex(CACHE_NAME);
assertNotNull(icache);
CacheObjectContext coCtx = icache.context().cacheObjectContext();
for (IgniteCacheOffheapManager.CacheDataStore store : icache.context().offheap().cacheDataStores()) {
GridCursor<? extends CacheDataRow> cur = store.cursor();
while (cur.next()) {
CacheDataRow row = cur.get();
int key = row.key().value(coCtx, false);
List<IgniteBiTuple<Object, MvccVersion>> vers = store.mvccFindAllVersions(icache.context(), row.key());
if (!afterRebuild || key <= AMOUNT / 2)
assertEquals(key, vers.size());
else {
// For keys affected by concurrent put there are two versions -
// -1 (concurrent put mark) and newest restored value as long as put cleans obsolete versions.
assertEquals(2, vers.size());
Object val0 = ((CacheObject) vers.get(0).getKey()).value(coCtx, false);
Object val1 = ((CacheObject) vers.get(1).getKey()).value(coCtx, false);
assertEquals(-1, val0);
assertEquals(key, val1);
}
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method checkDataWalEntries.
/**
* @throws Exception if failed.
*/
private void checkDataWalEntries(boolean mvcc) throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
GridCacheContext<Object, Object> cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
IgniteWriteAheadLogManager wal = sharedCtx.wal();
assertTrue(wal.isAlwaysWriteFullPages());
db.enableCheckpoints(false).get();
final int cnt = 10;
List<DataEntry> entries = new ArrayList<>(cnt);
for (int i = 0; i < cnt; i++) {
GridCacheOperation op = i % 2 == 0 ? GridCacheOperation.UPDATE : GridCacheOperation.DELETE;
KeyCacheObject key = cctx.toCacheKeyObject(i);
CacheObject val = null;
if (op != GridCacheOperation.DELETE)
val = cctx.toCacheObject("value-" + i);
entries.add(mvcc ? new MvccDataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, new MvccVersionImpl(1000L, 10L, i + 1)) : new DataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, DataEntry.EMPTY_FLAGS));
}
UUID cpId = UUID.randomUUID();
WALPointer start = wal.log(new CheckpointRecord(cpId, null));
wal.flush(start, false);
for (DataEntry entry : entries) wal.log(mvcc ? new MvccDataRecord((MvccDataEntry) entry) : new DataRecord(entry));
// Data will not be written to the page store.
stopAllGrids();
ig = startGrid(0);
ig.cluster().active(true);
sharedCtx = ig.context().cache().context();
cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
db = (GridCacheDatabaseSharedManager) sharedCtx.database();
wal = sharedCtx.wal();
db.enableCheckpoints(false).get();
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> cpRecordTup = it.next();
assert cpRecordTup.get2() instanceof CheckpointRecord;
assertEquals(start, cpRecordTup.get1());
CheckpointRecord cpRec = (CheckpointRecord) cpRecordTup.get2();
assertEquals(cpId, cpRec.checkpointId());
assertNull(cpRec.checkpointMark());
assertFalse(cpRec.end());
int idx = 0;
CacheObjectContext coctx = cctx.cacheObjectContext();
while (idx < entries.size()) {
IgniteBiTuple<WALPointer, WALRecord> dataRecTup = it.next();
if (!mvcc)
assert dataRecTup.get2() instanceof DataRecord;
else
assert dataRecTup.get2() instanceof MvccDataRecord;
DataRecord dataRec = (DataRecord) dataRecTup.get2();
DataEntry entry = entries.get(idx);
assertEquals(1, dataRec.entryCount());
DataEntry readEntry = dataRec.get(0);
assertEquals(entry.cacheId(), readEntry.cacheId());
assertEquals(entry.key().<Integer>value(coctx, true), readEntry.key().<Integer>value(coctx, true));
assertEquals(entry.op(), readEntry.op());
if (entry.op() == GridCacheOperation.UPDATE)
assertEquals(entry.value().value(coctx, true), readEntry.value().value(coctx, true));
else
assertNull(entry.value());
assertEquals(entry.writeVersion(), readEntry.writeVersion());
assertEquals(entry.nearXidVersion(), readEntry.nearXidVersion());
assertEquals(entry.partitionCounter(), readEntry.partitionCounter());
if (mvcc) {
assert entry instanceof MvccDataEntry;
assert readEntry instanceof MvccDataEntry;
assertEquals(((MvccDataEntry) entry).mvccVer(), ((MvccDataEntry) readEntry).mvccVer());
}
idx++;
}
}
}
use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class QueryIndexRowHandler method value.
/**
*/
private Object value(CacheDataRow row) {
CacheObject val = row.value();
Object o = getBinaryObject(val);
if (o != null)
return o;
CacheObjectContext coctx = cacheDesc.context().cacheObjectContext();
return row.value().value(coctx, false);
}
use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class IgniteSnapshotManagerSelfTest method testSnapshotIterator.
/**
* @throws Exception If fails
*/
@Test
public void testSnapshotIterator() throws Exception {
int keys = 127;
IgniteEx ignite = startGridsWithCache(2, dfltCacheCfg.setAffinity(new RendezvousAffinityFunction(false, 1)), keys);
ignite.snapshot().createSnapshot(SNAPSHOT_NAME).get();
int rows = 0;
try (GridCloseableIterator<CacheDataRow> iter = snp(ignite).partitionRowIterator(ignite.context(), SNAPSHOT_NAME, ignite.context().pdsFolderResolver().resolveFolders().folderName(), dfltCacheCfg.getName(), 0)) {
CacheObjectContext coctx = ignite.cachex(dfltCacheCfg.getName()).context().cacheObjectContext();
while (iter.hasNext()) {
CacheDataRow row = iter.next();
// Invariant for cache: cache key always equals to cache value.
assertEquals("Invalid key/value pair [key=" + row.key() + ", val=" + row.value() + ']', row.key().value(coctx, false, U.resolveClassLoader(ignite.configuration())), (Integer) row.value().value(coctx, false));
rows++;
}
}
assertEquals("Invalid number of rows: " + rows, keys, rows);
}
Aggregations