use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class TxCrossCacheRemoteMultiplePartitionReservationTest method testRemoteCommitPartitionReservations.
/**
*/
@Test
public void testRemoteCommitPartitionReservations() throws Exception {
try {
IgniteEx crd = startGrids(2);
awaitPartitionMapExchange();
IgniteEx client = startClientGrid("client");
IgniteCache<Object, Object> cache1 = client.cache(CACHE1);
IgniteCache<Object, Object> cache2 = client.cache(CACHE2);
List<Integer> evictingIds = evictingPartitionsAfterJoin(crd, crd.cache(CACHE1), 10);
int[] backupParts = crd.affinity(CACHE1).backupPartitions(crd.localNode());
Arrays.sort(backupParts);
int evictingBackupPartId = -1;
for (int id : evictingIds) {
if (Arrays.binarySearch(backupParts, id) >= 0) {
evictingBackupPartId = id;
break;
}
}
assertTrue(evictingBackupPartId != -1);
startGrid(2);
awaitPartitionMapExchange(true, true, null);
// Mock partition after re-create.
final int finalEvictingBackupPartId = evictingBackupPartId;
Map<Integer, AtomicInteger> reserveCntrs = new ConcurrentHashMap<>();
GridDhtPartitionTopologyImpl.PartitionFactory factory = new GridDhtPartitionTopologyImpl.PartitionFactory() {
@Override
public GridDhtLocalPartition create(GridCacheSharedContext ctx, CacheGroupContext grp, int id, boolean recovery) {
return id != finalEvictingBackupPartId ? new GridDhtLocalPartition(ctx, grp, id, recovery) : new GridDhtLocalPartition(ctx, grp, id, recovery) {
@Override
public boolean reserve() {
reserveCntrs.computeIfAbsent(grp.groupId(), integer -> new AtomicInteger()).incrementAndGet();
return super.reserve();
}
};
}
};
Stream.of(CACHE1, CACHE2).map(cache -> (GridDhtPartitionTopologyImpl) crd.cachex(cache).context().topology()).forEach(topology -> topology.partitionFactory(factory));
stopGrid(2);
awaitPartitionMapExchange(true, true, null);
reserveCntrs.values().forEach(cntr -> cntr.set(0));
// backup commits.
try (Transaction tx = client.transactions().txStart()) {
cache1.put(evictingBackupPartId, 0);
cache2.put(evictingBackupPartId, 0);
tx.commit();
}
assertEquals("Expecting same reservations count for all caches [cntrs=" + reserveCntrs.toString() + ']', 1, reserveCntrs.values().stream().map(AtomicInteger::get).distinct().count());
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class VisorFindAndDeleteGarbageInPersistenceClosure method calcListOfPartitions.
/**
* @param grpIds Group ids to generate list of partitions for.
*/
private List<T2<CacheGroupContext, GridDhtLocalPartition>> calcListOfPartitions(Set<Integer> grpIds) {
List<T2<CacheGroupContext, GridDhtLocalPartition>> partArgs = new ArrayList<>();
for (Integer grpId : grpIds) {
CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(grpId);
List<GridDhtLocalPartition> parts = grpCtx.topology().localPartitions();
for (GridDhtLocalPartition part : parts) partArgs.add(new T2<>(grpCtx, part));
}
// To decrease contention on same group.
Collections.shuffle(partArgs);
return partArgs;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class VisorFindAndDeleteGarbageInPersistenceClosure method processPartition.
/**
* @param grpCtx Group context.
* @param part Local partition.
*/
private Map<Integer, Map<Integer, Long>> processPartition(CacheGroupContext grpCtx, GridDhtLocalPartition part) {
if (!part.reserve())
return Collections.emptyMap();
Map<Integer, Map<Integer, Long>> stoppedCachesForGrpId = new HashMap<>();
try {
if (part.state() != GridDhtPartitionState.OWNING)
return Collections.emptyMap();
GridIterator<CacheDataRow> it = grpCtx.offheap().partitionIterator(part.id());
while (it.hasNextX()) {
CacheDataRow row = it.nextX();
if (row.cacheId() == 0)
break;
int cacheId = row.cacheId();
GridCacheContext cacheCtx = grpCtx.shared().cacheContext(row.cacheId());
if (cacheCtx == null)
stoppedCachesForGrpId.computeIfAbsent(grpCtx.groupId(), (x) -> new HashMap<>()).compute(cacheId, (x, y) -> y == null ? 1 : y + 1);
}
} catch (IgniteCheckedException e) {
U.error(log, "Failed to process partition [grpId=" + grpCtx.groupId() + ", partId=" + part.id() + "]", e);
return Collections.emptyMap();
} finally {
part.release();
}
processedPartitions.incrementAndGet();
printProgressIfNeeded();
return stoppedCachesForGrpId;
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class CacheRentingStateRepairTest method testRentingStateRepairAfterRestart.
/**
* Tests partition is properly evicted when node is restarted in the middle of the eviction.
*/
@Test
public void testRentingStateRepairAfterRestart() throws Exception {
try {
IgniteEx g0 = startGrid(0);
g0.cluster().baselineAutoAdjustEnabled(false);
startGrid(1);
g0.cluster().active(true);
awaitPartitionMapExchange();
List<Integer> parts = evictingPartitionsAfterJoin(g0, g0.cache(DEFAULT_CACHE_NAME), 20);
int delayEvictPart = parts.get(0);
int k = 0;
while (g0.affinity(DEFAULT_CACHE_NAME).partition(k) != delayEvictPart) k++;
g0.cache(DEFAULT_CACHE_NAME).put(k, k);
GridDhtPartitionTopology top = dht(g0.cache(DEFAULT_CACHE_NAME)).topology();
GridDhtLocalPartition part = top.localPartition(delayEvictPart);
assertNotNull(part);
// Prevent eviction.
part.reserve();
startGrid(2);
g0.cluster().setBaselineTopology(3);
// Wait until all is evicted except first partition.
assertTrue("Failed to wait for partition eviction: reservedPart=" + part.id() + ", otherParts=" + top.localPartitions().stream().map(p -> "[id=" + p.id() + ", state=" + p.state() + ']').collect(Collectors.toList()), waitForCondition(() -> {
for (int i = 0; i < parts.size(); i++) {
if (delayEvictPart == i)
// Skip reserved partition.
continue;
Integer p = parts.get(i);
@Nullable GridDhtLocalPartition locPart = top.localPartition(p);
assertNotNull(locPart);
if (locPart.state() != GridDhtPartitionState.EVICTED)
return false;
}
return true;
}, 5000));
/**
* Force renting state before node stop.
* This also could be achieved by stopping node just after RENTING state is set.
*/
part.setState(GridDhtPartitionState.RENTING);
assertEquals(GridDhtPartitionState.RENTING, part.state());
stopGrid(0);
g0 = startGrid(0);
awaitPartitionMapExchange();
part = dht(g0.cache(DEFAULT_CACHE_NAME)).topology().localPartition(delayEvictPart);
assertNotNull(part);
final GridDhtLocalPartition finalPart = part;
CountDownLatch evictLatch = new CountDownLatch(1);
part.rent().listen(new IgniteInClosure<IgniteInternalFuture<?>>() {
@Override
public void apply(IgniteInternalFuture<?> fut) {
assertEquals(GridDhtPartitionState.EVICTED, finalPart.state());
evictLatch.countDown();
}
});
assertTrue("Failed to wait for partition eviction after restart", evictLatch.await(5_000, TimeUnit.MILLISECONDS));
awaitPartitionMapExchange(true, true, null);
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class WalRecoveryTxLogicalRecordsTest method getFreeListData.
/**
* @param ignite Node.
* @param cacheName Cache name.
* @return Cache free lists data (partition number to map of buckets to tails and buckets size).
*/
private Map<Integer, T2<Map<Integer, long[]>, int[]>> getFreeListData(Ignite ignite, String cacheName) throws IgniteCheckedException {
GridCacheProcessor cacheProc = ((IgniteEx) ignite).context().cache();
GridCacheContext ctx = cacheProc.cache(cacheName).context();
List<GridDhtLocalPartition> parts = ctx.topology().localPartitions();
assertTrue(!parts.isEmpty());
assertEquals(ctx.affinity().partitions(), parts.size());
Map<Integer, T2<Map<Integer, long[]>, int[]>> res = new HashMap<>();
boolean foundNonEmpty = false;
boolean foundTails = false;
cacheProc.context().database().checkpointReadLock();
try {
for (GridDhtLocalPartition part : parts) {
AbstractFreeList freeList = (AbstractFreeList) part.dataStore().rowStore().freeList();
if (freeList == null)
// Lazy store.
continue;
// Flush free-list onheap cache to page memory.
freeList.saveMetadata(IoStatisticsHolderNoOp.INSTANCE);
AtomicReferenceArray<PagesList.Stripe[]> buckets = getFieldValue(freeList, AbstractFreeList.class, "buckets");
AtomicLongArray bucketsSize = getFieldValue(freeList, PagesList.class, "bucketsSize");
assertNotNull(buckets);
assertNotNull(bucketsSize);
assertTrue(buckets.length() > 0);
assertEquals(bucketsSize.length(), buckets.length());
Map<Integer, long[]> tailsPerBucket = new HashMap<>();
for (int i = 0; i < buckets.length(); i++) {
PagesList.Stripe[] tails = buckets.get(i);
long[] ids = null;
if (tails != null) {
ids = new long[tails.length];
for (int j = 0; j < tails.length; j++) ids[j] = tails[j].tailId;
}
tailsPerBucket.put(i, ids);
if (tails != null) {
assertTrue(tails.length > 0);
foundTails = true;
}
}
int[] cntsPerBucket = new int[bucketsSize.length()];
for (int i = 0; i < bucketsSize.length(); i++) {
cntsPerBucket[i] = (int) bucketsSize.get(i);
if (cntsPerBucket[i] > 0)
foundNonEmpty = true;
}
res.put(part.id(), new T2<>(tailsPerBucket, cntsPerBucket));
}
} finally {
cacheProc.context().database().checkpointReadUnlock();
}
assertTrue(foundNonEmpty);
assertTrue(foundTails);
return res;
}
Aggregations