use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method testHistoricalRebalanceIterator.
/**
* @throws Exception if failed.
*/
@Test
public void testHistoricalRebalanceIterator() throws Exception {
System.setProperty(IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD, "0");
extraCcfg = new CacheConfiguration(CACHE_NAME + "2");
extraCcfg.setAffinity(new RendezvousAffinityFunction(false, PARTS));
Ignite ignite = startGrid();
try {
ignite.cluster().active(true);
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) ((IgniteEx) ignite).context().cache().context().database();
dbMgr.waitForCheckpoint("test");
// This number depends on wal history size.
int entries = 25;
IgniteCache<Integer, Integer> cache = ignite.cache(CACHE_NAME);
IgniteCache<Integer, Integer> cache2 = ignite.cache(CACHE_NAME + "2");
for (int i = 0; i < entries; i++) {
// Put to partition 0.
cache.put(i * PARTS, i * PARTS);
// Put to partition 1.
cache.put(i * PARTS + 1, i * PARTS + 1);
// Put to another cache.
cache2.put(i, i);
dbMgr.waitForCheckpoint("test");
}
for (int i = 0; i < entries; i++) {
assertEquals((Integer) (i * PARTS), cache.get(i * PARTS));
assertEquals((Integer) (i * PARTS + 1), cache.get(i * PARTS + 1));
assertEquals((Integer) (i), cache2.get(i));
}
CacheGroupContext grp = ((IgniteEx) ignite).context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
IgniteCacheOffheapManager offh = grp.offheap();
AffinityTopologyVersion topVer = grp.affinity().lastVersion();
IgniteDhtDemandedPartitionsMap map;
for (int i = 0; i < entries; i++) {
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(0, i, entries, PARTS);
WALPointer ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(0));
for (int j = i; j < entries; j++) {
assertTrue("i=" + i + ", j=" + j, it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(1, i, entries, PARTS);
ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(1));
for (int j = i; j < entries; j++) {
assertTrue(it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS + 1, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS + 1, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
}
stopAllGrids();
// Check that iterator is valid after restart.
ignite = startGrid();
ignite.cluster().active(true);
grp = ((IgniteEx) ignite).context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
offh = grp.offheap();
topVer = grp.affinity().lastVersion();
for (int i = 0; i < entries; i++) {
long start = System.currentTimeMillis();
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(0, i, entries, PARTS);
WALPointer ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
long end = System.currentTimeMillis();
info("Time to get iterator: " + (end - start));
assertTrue("Not historical for iteration: " + i, it.historical(0));
assertNotNull(it);
start = System.currentTimeMillis();
for (int j = i; j < entries; j++) {
assertTrue("i=" + i + ", j=" + j, it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS, (int) row.value().value(grp.cacheObjectContext(), false));
}
end = System.currentTimeMillis();
info("Time to iterate: " + (end - start));
assertFalse(it.hasNext());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(1, i, entries, PARTS);
ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
assertTrue("Not historical for iteration: " + i, it.historical(1));
for (int j = i; j < entries; j++) {
assertTrue(it.hasNextX());
CacheDataRow row = it.next();
assertEquals(j * PARTS + 1, (int) row.key().value(grp.cacheObjectContext(), false));
assertEquals(j * PARTS + 1, (int) row.value().value(grp.cacheObjectContext(), false));
}
assertFalse(it.hasNext());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
}
} finally {
stopAllGrids();
System.clearProperty(IgniteSystemProperties.IGNITE_PDS_WAL_REBALANCE_THRESHOLD);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method rows.
/**
* @param ignite Ignite.
* @param part Partition.
* @param from From counter.
* @param to To counter.
*/
private List<CacheDataRow> rows(Ignite ignite, int part, long from, long to) throws IgniteCheckedException {
CacheGroupContext grp = ((IgniteEx) ignite).context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
IgniteCacheOffheapManager offh = grp.offheap();
AffinityTopologyVersion topVer = grp.affinity().lastVersion();
IgniteDhtDemandedPartitionsMap map = new IgniteDhtDemandedPartitionsMap();
map.addHistorical(part, from, to, PARTS);
List<CacheDataRow> rows = new ArrayList<>();
WALPointer ptr = reserveWalPointerForIterator(grp.shared());
try (IgniteRebalanceIterator it = offh.rebalanceIterator(map, topVer)) {
assertNotNull(it);
while (it.hasNextX()) rows.add(it.next());
} finally {
releaseWalPointerForIterator(grp.shared(), ptr);
}
return rows;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap in project ignite by apache.
the class WalRebalanceRestartTest method stopFirstFoundSupplier.
/**
* Stop first found supplier for current rebalance on specific node.
*
* @param ignite Ignite.
*/
private void stopFirstFoundSupplier(IgniteEx ignite) {
IgniteInternalFuture rebFut = ignite.cachex(DEFAULT_CACHE_NAME).context().preloader().rebalanceFuture();
assertFalse(rebFut.isDone());
Map<UUID, IgniteDhtDemandedPartitionsMap> remainding = U.field(rebFut, "remaining");
assertFalse(remainding.isEmpty());
UUID supplierId = remainding.keySet().iterator().next();
info("First dupplier: " + supplierId);
for (Ignite ign : G.allGrids()) {
if (ign.cluster().localNode().id().equals(supplierId))
ign.close();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.preloader.IgniteDhtDemandedPartitionsMap in project ignite by apache.
the class GridCommonAbstractTest method printPartitionState.
/**
* @param cacheName Cache name.
* @param firstParts Count partition for print (will be print first count partition).
* @param nodes Grid nodes.
*
* Print partitionState for cache.
*/
protected static void printPartitionState(String cacheName, int firstParts, List<? extends Ignite> nodes) {
StringBuilder sb = new StringBuilder();
sb.append("----preload sync futures----\n");
for (Ignite ig : nodes) {
IgniteKernal k = ((IgniteKernal) ig);
GridCacheAdapter<Object, Object> adapter = k.internalCache(cacheName);
if (adapter == null)
continue;
IgniteInternalFuture<?> syncFut = adapter.preloader().syncFuture();
sb.append("nodeId=").append(k.context().localNodeId()).append(" consistentId=").append(k.localNode().consistentId()).append(" isDone=").append(syncFut.isDone()).append("\n");
}
sb.append("----rebalance futures----\n");
for (Ignite ig : nodes) {
IgniteKernal k = ((IgniteKernal) ig);
GridCacheAdapter<Object, Object> adapter = k.internalCache(cacheName);
if (adapter == null)
continue;
IgniteInternalFuture<?> f = adapter.preloader().rebalanceFuture();
try {
sb.append("nodeId=").append(k.context().localNodeId()).append(" isDone=").append(f.isDone()).append(" res=").append(f.isDone() ? f.get() : "N/A").append(" topVer=").append((U.hasField(f, "topVer") ? String.valueOf(U.<Object>field(f, "topVer")) : "N/A")).append("\n");
Map<UUID, IgniteDhtDemandedPartitionsMap> remaining = U.field(f, "remaining");
sb.append("remaining: ");
sb.append(remaining.toString());
sb.append("\n");
} catch (Throwable e) {
log.error(e.getMessage());
}
}
sb.append("----partition state----\n");
for (Ignite g : nodes) {
IgniteKernal g0 = (IgniteKernal) g;
sb.append("localNodeId=").append(g0.localNode().id()).append(" grid=").append(g0.name()).append("\n");
IgniteCacheProxy<?, ?> cache = null;
try {
cache = g0.context().cache().jcache(cacheName);
} catch (IllegalArgumentException e) {
// Client topology.
continue;
}
GridDhtCacheAdapter<?, ?> dht = dht(cache);
GridDhtPartitionTopology top = dht.topology();
int parts = firstParts == 0 ? cache.context().config().getAffinity().partitions() : firstParts;
for (int p = 0; p < parts; p++) {
AffinityTopologyVersion readyVer = dht.context().shared().exchange().readyAffinityVersion();
Collection<UUID> affNodes = F.nodeIds(dht.context().affinity().assignment(readyVer).idealAssignment().get(p));
GridDhtLocalPartition part = top.localPartition(p, AffinityTopologyVersion.NONE, false);
sb.append("local part=");
if (part != null) {
sb.append(p).append(" counters=").append(part.dataStore().partUpdateCounter()).append(" fullSize=").append(part.fullSize()).append(" state=").append(part.state()).append(" reservations=").append(part.reservations());
} else
sb.append(p).append(" is null");
sb.append(" isAffNode=").append(affNodes.contains(g0.localNode().id())).append("\n");
for (UUID nodeId : F.nodeIds(g0.context().discovery().allNodes())) {
if (!nodeId.equals(g0.localNode().id())) {
top.readLock();
// Peek to remote state directly to distinguish if a partition is EVICTED or yet not initialized.
GridDhtPartitionFullMap map = U.field(top, "node2part");
try {
final GridDhtPartitionMap nodeMap = map.get(nodeId);
if (nodeMap == null)
// Skip client node.
continue;
final GridDhtPartitionState rmtState = nodeMap.get(p);
if (rmtState != null) {
sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" state=").append(rmtState).append(" isAffNode=").append(affNodes.contains(nodeId)).append("\n");
} else {
sb.append(" nodeId=").append(nodeId).append(" part=").append(p).append(" is null").append("\n");
}
} finally {
top.readUnlock();
}
}
}
}
sb.append("\n");
}
log.info("dump partitions state for <" + cacheName + ">:\n" + sb.toString());
}
Aggregations