use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project gridgain by gridgain.
the class IgnitePdsDestroyCacheTest method doTestDestroyCacheNotThrowsOOM.
/**
*/
public void doTestDestroyCacheNotThrowsOOM(boolean loc) throws Exception {
Field batchField = U.findField(IgniteCacheOffheapManagerImpl.class, "BATCH_SIZE");
int batchSize = batchField.getInt(null);
int pageSize = 1024;
int partitions = 32;
DataStorageConfiguration ds = new DataStorageConfiguration().setDefaultDataRegionConfiguration(new DataRegionConfiguration().setMaxSize(batchSize * pageSize * partitions).setPersistenceEnabled(true)).setPageSize(pageSize);
int payLoadSize = pageSize * 3 / 4;
IgniteConfiguration cfg = getConfiguration().setDataStorageConfiguration(ds);
final IgniteEx ignite = startGrid(optimize(cfg));
ignite.cluster().active(true);
startGroupCachesDynamically(ignite, loc);
PageMemoryEx pageMemory = (PageMemoryEx) ignite.cachex(cacheName(0)).context().dataRegion().pageMemory();
IgniteInternalFuture<?> ldrFut = runAsync(() -> {
IgniteCache<Object, byte[]> c1 = ignite.cache(cacheName(0));
long totalPages = pageMemory.totalPages();
for (int i = 0; i <= totalPages; i++) c1.put(i, new byte[payLoadSize]);
});
CountDownLatch cpStart = new CountDownLatch(1);
GridCacheDatabaseSharedManager dbMgr = ((GridCacheDatabaseSharedManager) ignite.context().cache().context().database());
CheckpointListener lsnr = new CheckpointListener() {
@Override
public void onMarkCheckpointBegin(Context ctx) {
/* No-op. */
}
@Override
public void onCheckpointBegin(Context ctx) {
cpStart.countDown();
}
@Override
public void beforeCheckpointBegin(Context ctx) {
/* No-op. */
}
};
dbMgr.addCheckpointListener(lsnr);
ldrFut.get();
cpStart.await();
dbMgr.removeCheckpointListener(lsnr);
IgniteInternalFuture<?> delFut = runAsync(() -> {
if (loc)
ignite.cache(cacheName(0)).close();
else
ignite.destroyCache(cacheName(0));
});
delFut.get(20_000);
}
use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project gridgain by gridgain.
the class CheckpointListenerForRegionTest method checkpointListenerWatcher.
/**
* Add checkpoint listener which count the number of listener calls during each checkpoint.
*
* @param db Shared manager for manage the listeners.
* @param defaultRegion Region for which listener should be added.
* @return Integer which count the listener calls.
*/
@NotNull
private AtomicInteger checkpointListenerWatcher(GridCacheDatabaseSharedManager db, DataRegion defaultRegion) {
AtomicInteger checkpointListenerCounter = new AtomicInteger();
db.addCheckpointListener(new CheckpointListener() {
@Override
public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
checkpointListenerCounter.getAndIncrement();
}
@Override
public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
checkpointListenerCounter.getAndIncrement();
}
@Override
public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException {
checkpointListenerCounter.getAndIncrement();
}
}, defaultRegion);
return checkpointListenerCounter;
}
use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project gridgain by gridgain.
the class IgniteWalRebalanceTest method testRebalanceReassignAndOwnPartitions.
/**
* Tests that owning partitions (that are trigged by rebalance future) cannot be mapped to a new rebalance future
* that was created by RebalanceReassignExchangeTask.
*
* @throws Exception If failed.
*/
@Test
public void testRebalanceReassignAndOwnPartitions() throws Exception {
backups = 3;
IgniteEx supplier1 = startGrid(0);
IgniteEx supplier2 = startGrid(1);
IgniteEx demander = startGrid(2);
supplier1.cluster().state(ACTIVE);
String cacheName1 = "test-cache-1";
String cacheName2 = "test-cache-2";
IgniteCache<Integer, IndexedObject> c1 = supplier1.getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>(cacheName1).setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, PARTS_CNT)).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setRebalanceOrder(10));
IgniteCache<Integer, IndexedObject> c2 = supplier1.getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>(cacheName2).setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, PARTS_CNT)).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setRebalanceOrder(20));
// Fill initial data.
final int entryCnt = PARTS_CNT * 200;
final int preloadEntryCnt = PARTS_CNT * 400;
int val = 0;
for (int k = 0; k < preloadEntryCnt; k++) {
c1.put(k, new IndexedObject(val++));
c2.put(k, new IndexedObject(val++));
}
forceCheckpoint();
stopGrid(2);
// This is an easy way to emulate missing partitions on the first rebalance.
for (int i = 0; i < entryCnt; i++) c1.put(i, new IndexedObject(val++));
// Full rebalance for the cacheName2.
for (int i = 0; i < preloadEntryCnt; i++) c2.put(i, new IndexedObject(val++));
// Delay rebalance process for specified groups.
blockMsgPred = (node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
return msg0.groupId() == CU.cacheId(cacheName1) || msg0.groupId() == CU.cacheId(cacheName2);
}
return false;
};
// Emulate missing partitions and trigger RebalanceReassignExchangeTask which should re-trigger a new rebalance.
FailingIOFactory ioFactory = injectFailingIOFactory(supplier1);
demander = startGrid(2);
TestRecordingCommunicationSpi demanderSpi = TestRecordingCommunicationSpi.spi(grid(2));
// Wait until demander starts rebalancning.
demanderSpi.waitForBlocked();
// Need to start a client node in order to block RebalanceReassignExchangeTask (and do not change the affinity)
// until cacheName2 triggers a checkpoint after rebalancing.
CountDownLatch blockClientJoin = new CountDownLatch(1);
CountDownLatch unblockClientJoin = new CountDownLatch(1);
demander.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
@Override
public void onInitBeforeTopologyLock(GridDhtPartitionsExchangeFuture fut) {
blockClientJoin.countDown();
try {
if (!unblockClientJoin.await(getTestTimeout(), MILLISECONDS))
throw new IgniteException("Failed to wait for client node joinning the cluster.");
} catch (InterruptedException e) {
throw new IgniteException("Unexpected exception.", e);
}
}
});
startClientGrid(4);
// Wait for a checkpoint after rebalancing cacheName2.
CountDownLatch blockCheckpoint = new CountDownLatch(1);
CountDownLatch unblockCheckpoint = new CountDownLatch(1);
((GridCacheDatabaseSharedManager) demander.context().cache().context().database()).addCheckpointListener(new CheckpointListener() {
/**
* {@inheritDoc}
*/
@Override
public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
if (!ctx.progress().reason().contains(String.valueOf(CU.cacheId(cacheName2))))
return;
blockCheckpoint.countDown();
try {
if (!unblockCheckpoint.await(getTestTimeout(), MILLISECONDS))
throw new IgniteCheckedException("Failed to wait for unblocking checkpointer.");
} catch (InterruptedException e) {
throw new IgniteCheckedException("Unexpected exception", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException {
}
/**
* {@inheritDoc}
*/
@Override
public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
}
});
// Unblock the first rebalance.
demanderSpi.stopBlock();
// Wait for start of the checkpoint after rebalancing cacheName2.
assertTrue("Failed to wait for checkpoint.", blockCheckpoint.await(getTestTimeout(), MILLISECONDS));
// Block the second rebalancing.
demanderSpi.blockMessages((node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
return msg0.groupId() == CU.cacheId(cacheName1);
}
return false;
});
ioFactory.reset();
// Let's unblock client exchange and, therefore, handling of RebalanceReassignExchangeTask,
// which is already scheduled.
unblockClientJoin.countDown();
// Wait for starting the second rebalance (new chain of rebalance futures should be created at this point).
demanderSpi.waitForBlocked();
GridFutureAdapter checkpointFut = ((GridCacheDatabaseSharedManager) demander.context().cache().context().database()).getCheckpointer().currentProgress().futureFor(FINISHED);
// Unblock checkpointer.
unblockCheckpoint.countDown();
assertTrue("Failed to wait for a checkpoint.", GridTestUtils.waitForCondition(() -> checkpointFut.isDone(), getTestTimeout()));
// Well, there is a race between we unblock rebalance and the current checkpoint executes all its listeners.
demanderSpi.stopBlock();
awaitPartitionMapExchange(false, true, null);
}
use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project gridgain by gridgain.
the class PageMemoryTracker method start.
/**
* Start tracking pages.
*/
synchronized void start() {
if (!isEnabled() || started)
return;
pageSize = ctx.igniteConfiguration().getDataStorageConfiguration().getPageSize();
pageMemoryMock = mockPageMemory();
GridCacheSharedContext sharedCtx = gridCtx.cache().context();
// Initialize one memory region for all data regions of target ignite node.
long maxMemorySize = 0;
for (DataRegion dataRegion : sharedCtx.database().dataRegions()) {
if (dataRegion.pageMemory() instanceof PageMemoryImpl)
maxMemorySize += dataRegion.config().getMaxSize();
}
long[] chunks = new long[] { maxMemorySize };
memoryProvider = new UnsafeMemoryProvider(log);
memoryProvider.initialize(chunks);
memoryRegion = memoryProvider.nextRegion();
maxPages = (int) (maxMemorySize / pageSize);
pageSlots = new DirectMemoryPageSlot[maxPages];
freeSlotsCnt = maxPages;
tmpBuf1 = ByteBuffer.allocateDirect(pageSize);
tmpBuf2 = ByteBuffer.allocateDirect(pageSize);
if (cfg.isCheckPagesOnCheckpoint()) {
checkpointLsnr = new CheckpointListener() {
@Override
public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
if (!checkPages(false, true))
throw new IgniteCheckedException("Page memory is inconsistent after applying WAL delta records.");
}
@Override
public void beforeCheckpointBegin(Context ctx) {
/* No-op. */
}
@Override
public void onCheckpointBegin(Context ctx) {
/* No-op. */
}
};
((GridCacheDatabaseSharedManager) gridCtx.cache().context().database()).addCheckpointListener(checkpointLsnr);
}
lastPageIdx = 0;
started = true;
log.info("PageMemory tracker started, " + U.readableSize(maxMemorySize, false) + " offheap memory allocated.");
}
use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project gridgain by gridgain.
the class DurableBackgroundTasksProcessorSelfTest method testDontDeleteTaskIfItsRestart.
/**
* Check that the task will not be deleted from the MetaStorage if it was restarted.
*
* @throws Exception If failed.
*/
@Test
public void testDontDeleteTaskIfItsRestart() throws Exception {
IgniteEx n = startGrid(0);
ObservingCheckpointListener observingCpLsnr = new ObservingCheckpointListener();
dbMgr(n).addCheckpointListener(observingCpLsnr);
n.cluster().state(ACTIVE);
CheckpointWorkflow cpWorkflow = checkpointWorkflow(n);
List<CheckpointListener> cpLs = cpWorkflow.getRelevantCheckpointListeners(dbMgr(n).checkpointedDataRegions());
assertTrue(cpLs.contains(observingCpLsnr));
assertTrue(cpLs.contains(durableBackgroundTask(n)));
assertTrue(cpLs.indexOf(observingCpLsnr) < cpLs.indexOf(durableBackgroundTask(n)));
SimpleTask simpleTask0 = new SimpleTask("t");
IgniteInternalFuture<Void> taskFut = durableBackgroundTask(n).executeAsync(simpleTask0, true);
simpleTask0.onExecFut.get(getTestTimeout());
forceCheckpoint();
dbMgr(n).enableCheckpoints(false).get(getTestTimeout());
simpleTask0.taskFut.onDone(DurableBackgroundTaskResult.complete(null));
taskFut.get(getTestTimeout());
SimpleTask simpleTask1 = new SimpleTask("t");
AtomicReference<IgniteInternalFuture<Void>> taskFutRef = new AtomicReference<>();
observingCpLsnr.afterCheckpointEndConsumer = ctx -> taskFutRef.set(durableBackgroundTask(n).executeAsync(simpleTask1, true));
dbMgr(n).enableCheckpoints(true).get(getTestTimeout());
forceCheckpoint();
assertNotNull(metaStorageOperation(n, ms -> ms.read(metaStorageKey(simpleTask0))));
simpleTask1.onExecFut.get(getTestTimeout());
simpleTask1.taskFut.onDone(DurableBackgroundTaskResult.complete(null));
taskFutRef.get().get(getTestTimeout());
forceCheckpoint();
assertNull(metaStorageOperation(n, ms -> ms.read(metaStorageKey(simpleTask1))));
}
Aggregations