use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class SnapshotFutureTask method onMarkCheckpointBegin.
/**
* {@inheritDoc}
*/
@Override
public void onMarkCheckpointBegin(Context ctx) {
// Write lock is hold. Partition pages counters has been collected under write lock.
if (stopping())
return;
try {
for (Map.Entry<Integer, Set<Integer>> e : parts.entrySet()) {
int grpId = e.getKey();
Set<Integer> grpParts = e.getValue();
CacheGroupContext gctx = cctx.cache().cacheGroup(grpId);
Iterator<GridDhtLocalPartition> iter;
if (grpParts == null)
iter = gctx.topology().currentLocalPartitions().iterator();
else {
if (grpParts.contains(INDEX_PARTITION)) {
throw new IgniteCheckedException("Index partition cannot be included into snapshot if " + " set of cache group partitions has been explicitly provided [grpId=" + grpId + ']');
}
iter = F.iterator(grpParts, gctx.topology()::localPartition, false);
}
Set<Integer> owning = new HashSet<>();
Set<Integer> missed = new HashSet<>();
// Iterate over partitions in particular cache group.
while (iter.hasNext()) {
GridDhtLocalPartition part = iter.next();
// There is no data assigned to partition, thus it haven't been created yet.
if (part.state() == GridDhtPartitionState.OWNING)
owning.add(part.id());
else
missed.add(part.id());
}
boolean affNode = gctx.nodeFilter() == null || gctx.nodeFilter().apply(cctx.localNode());
if (grpParts != null) {
// Exit with an error.
if (!missed.isEmpty()) {
throw new IgniteCheckedException("Snapshot operation cancelled due to " + "not all of requested partitions has OWNING state on local node [grpId=" + grpId + ", missed=" + S.compact(missed) + ']');
}
} else {
// OWNING state, so index partition must be included into snapshot.
if (!missed.isEmpty()) {
log.warning("All local cache group partitions in OWNING state have been included into a snapshot. " + "Partitions which have different states skipped. Index partitions has also been skipped " + "[snpName=" + snpName + ", grpId=" + grpId + ", missed=" + S.compact(missed) + ']');
} else if (affNode && missed.isEmpty() && cctx.kernalContext().query().moduleEnabled())
owning.add(INDEX_PARTITION);
}
processed.put(grpId, owning);
}
List<CacheConfiguration<?, ?>> ccfgs = new ArrayList<>();
for (Map.Entry<Integer, Set<Integer>> e : processed.entrySet()) {
int grpId = e.getKey();
CacheGroupContext gctx = cctx.cache().cacheGroup(grpId);
if (gctx == null)
throw new IgniteCheckedException("Cache group is stopped : " + grpId);
ccfgs.add(gctx.config());
addPartitionWriters(grpId, e.getValue(), FilePageStoreManager.cacheDirName(gctx.config()));
}
if (withMetaStorage) {
processed.put(MetaStorage.METASTORAGE_CACHE_ID, MetaStorage.METASTORAGE_PARTITIONS);
addPartitionWriters(MetaStorage.METASTORAGE_CACHE_ID, MetaStorage.METASTORAGE_PARTITIONS, MetaStorage.METASTORAGE_DIR_NAME);
}
pageStore.readConfigurationFiles(ccfgs, (ccfg, ccfgFile) -> ccfgSndrs.add(new CacheConfigurationSender(ccfg.getName(), FilePageStoreManager.cacheDirName(ccfg), ccfgFile)));
} catch (IgniteCheckedException e) {
acceptException(e);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class CacheRentingStateRepairTest method testRebalanceRentingPartitionAndNodeJoin.
/**
* @param client {@code True} for client node join.
* @param delay Delay.
*
* @throws Exception if failed.
*/
private void testRebalanceRentingPartitionAndNodeJoin(boolean client, long delay) throws Exception {
try {
IgniteEx g0 = startGrids(2);
g0.cluster().baselineAutoAdjustEnabled(false);
g0.cluster().active(true);
awaitPartitionMapExchange();
List<Integer> parts = evictingPartitionsAfterJoin(g0, g0.cache(DEFAULT_CACHE_NAME), 20);
int delayEvictPart = parts.get(0);
List<Integer> keys = partitionKeys(g0.cache(DEFAULT_CACHE_NAME), delayEvictPart, 2_000, 0);
for (Integer key : keys) g0.cache(DEFAULT_CACHE_NAME).put(key, key);
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) dht(g0.cache(DEFAULT_CACHE_NAME)).topology();
GridDhtLocalPartition part = top.localPartition(delayEvictPart);
assertNotNull(part);
// Wait for eviction. Same could be achieved by calling awaitPartitionMapExchange(true, true, null, true);
part.reserve();
startGrid(2);
resetBaselineTopology();
part.release();
part.rent().get();
CountDownLatch l1 = new CountDownLatch(1);
CountDownLatch l2 = new CountDownLatch(1);
// Create race between processing of final supply message and partition clearing.
// Evicted partition will be recreated using supplied factory.
top.partitionFactory((ctx, grp, id, recovery) -> id != delayEvictPart ? new GridDhtLocalPartition(ctx, grp, id, recovery) : new GridDhtLocalPartition(ctx, grp, id, recovery) {
@Override
public void beforeApplyBatch(boolean last) {
if (last) {
l1.countDown();
U.awaitQuiet(l2);
if (// Delay rebalance finish to enforce race with clearing.
delay > 0)
doSleep(delay);
}
}
});
stopGrid(2);
// Trigger rebalance for delayEvictPart after eviction.
resetBaselineTopology();
IgniteInternalFuture<?> fut = multithreadedAsync(new Runnable() {
@Override
public void run() {
try {
l1.await();
// Trigger partition clear on next topology version.
if (client)
startClientGrid(CLIENT);
else
startGrid(2);
// Finish partition rebalance after initiating clear.
l2.countDown();
} catch (Exception e) {
fail(X.getFullStackTrace(e));
}
}
}, 1);
fut.get();
awaitPartitionMapExchange(true, true, null, true);
assertPartitionsSame(idleVerify(g0));
} finally {
stopAllGrids();
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCacheRebalancingWithAsyncClearingTest method testPartitionClearingNotBlockExchange.
/**
* Test that partition clearing doesn't block partitions map exchange.
*
* @throws Exception If failed.
*/
@Test
@WithSystemProperty(key = IGNITE_PDS_MAX_CHECKPOINT_MEMORY_HISTORY_SIZE, value = "1")
public void testPartitionClearingNotBlockExchange() throws Exception {
IgniteEx ig = (IgniteEx) startGrids(3);
ig.cluster().active(true);
// High number of keys triggers long partition eviction.
final int keysCnt = SF.applyLB(300_000, 10_000);
try (IgniteDataStreamer<Integer, Integer> ds = ig.dataStreamer(CACHE_NAME)) {
log.info("Writing initial data...");
ds.allowOverwrite(true);
for (int k = 1; k <= keysCnt; k++) {
ds.addData(k, k);
if (k % 10_000 == 0)
log.info("Written " + k + " entities.");
}
log.info("Writing initial data finished.");
}
stopGrid(2);
awaitPartitionMapExchange();
try (IgniteDataStreamer<Integer, Integer> ds = ig.dataStreamer(CACHE_NAME)) {
log.info("Writing external data...");
ds.allowOverwrite(true);
for (int k = 1; k <= keysCnt; k++) {
ds.addData(k, 2 * k);
if (k % 10_000 == 0)
log.info("Written " + k + " entities.");
}
log.info("Writing external data finished.");
}
IgniteCache<Integer, Integer> cache = ig.cache(CACHE_NAME);
forceCheckpoint();
GridCachePartitionExchangeManager exchangeManager = ig.cachex(CACHE_NAME).context().shared().exchange();
long topVer = exchangeManager.lastTopologyFuture().topologyVersion().topologyVersion();
startGrid(2);
// Check that exchange future is completed and version is incremented
GridDhtPartitionsExchangeFuture fut1 = exchangeManager.lastTopologyFuture();
fut1.get();
Assert.assertEquals(topVer + 1, fut1.topologyVersion().topologyVersion());
// Check that additional exchange didn't influence on asynchronous partitions eviction.
boolean asyncClearingIsRunning = false;
for (int p = 0; p < PARTITIONS_CNT; p++) {
GridDhtLocalPartition part = grid(2).cachex(CACHE_NAME).context().topology().localPartition(p);
if (part != null && part.state() == GridDhtPartitionState.MOVING && part.isClearing()) {
asyncClearingIsRunning = true;
break;
}
}
Assert.assertTrue("Async clearing is not running at the moment", asyncClearingIsRunning);
// Check that stopping & starting node didn't break rebalance process.
stopGrid(1);
startGrid(1);
// Wait for rebalance on all nodes.
for (Ignite ignite : G.allGrids()) ignite.cache(CACHE_NAME).rebalance().get();
// Check no data loss.
for (int k = 1; k <= keysCnt; k++) {
Integer val = cache.get(k);
Assert.assertNotNull("Value for " + k + " is null", val);
Assert.assertEquals("Check failed for " + k + " " + val, 2 * k, (int) val);
}
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class NoneRebalanceModeSelfTest method testRemoveAll.
/**
* @throws Exception If failed.
*/
@Test
public void testRemoveAll() throws Exception {
GridNearTransactionalCache cache = (GridNearTransactionalCache) ((IgniteKernal) grid(0)).internalCache(DEFAULT_CACHE_NAME);
for (GridDhtLocalPartition part : cache.dht().topology().localPartitions()) assertEquals(OWNING, part.state());
grid(0).cache(DEFAULT_CACHE_NAME).removeAll();
}
use of org.apache.ignite.internal.processors.cache.distributed.dht.topology.GridDhtLocalPartition in project ignite by apache.
the class GridCacheRebalancingPartitionCountersTest method checkUpdCounter.
/**
*/
private void checkUpdCounter(IgniteEx ignite, List<String> issues, HashMap<Integer, Long> partMap) {
final CacheGroupContext grpCtx = ignite.context().cache().cacheGroup(CU.cacheId(CACHE_NAME));
assertNotNull(grpCtx);
GridDhtPartitionTopologyImpl top = (GridDhtPartitionTopologyImpl) grpCtx.topology();
List<GridDhtLocalPartition> locParts = top.localPartitions();
for (GridDhtLocalPartition part : locParts) {
Long cnt = partMap.get(part.id());
if (cnt == null)
partMap.put(part.id(), part.updateCounter());
if ((cnt != null && part.updateCounter() != cnt) || part.updateCounter() == 0)
issues.add("Node name " + ignite.name() + "Part = " + part.id() + " updCounter " + part.updateCounter());
}
}
Aggregations