use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project ignite by apache.
the class MetaStorage method init.
/**
*/
public void init(GridCacheDatabaseSharedManager db) throws IgniteCheckedException {
dataRegion.metrics().clear();
initInternal(db);
if (!PRESERVE_LEGACY_METASTORAGE_PARTITION_ID) {
if (partId == OLD_METASTORE_PARTITION)
db.temporaryMetaStorage(copyDataToTmpStorage());
else if (db.temporaryMetaStorage() != null) {
restoreDataFromTmpStorage(db.temporaryMetaStorage());
db.temporaryMetaStorage(null);
db.addCheckpointListener(new CheckpointListener() {
/**
* {@inheritDoc}
*/
@Override
public void onMarkCheckpointBegin(Context ctx) {
}
/**
* {@inheritDoc}
*/
@Override
public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
assert cctx.pageStore() != null;
int partTag = ((PageMemoryEx) dataRegion.pageMemory()).invalidate(METASTORAGE_CACHE_ID, OLD_METASTORE_PARTITION);
cctx.pageStore().truncate(METASTORAGE_CACHE_ID, OLD_METASTORE_PARTITION, partTag);
int idxTag = ((PageMemoryEx) dataRegion.pageMemory()).invalidate(METASTORAGE_CACHE_ID, PageIdAllocator.INDEX_PARTITION);
PageStore store = ((FilePageStoreManager) cctx.pageStore()).getStore(METASTORAGE_CACHE_ID, PageIdAllocator.INDEX_PARTITION);
store.truncate(idxTag);
db.removeCheckpointListener(this);
}
/**
* {@inheritDoc}
*/
@Override
public void beforeCheckpointBegin(Context ctx) {
}
}, dataRegion);
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project ignite by apache.
the class CheckpointListenerForRegionTest method checkpointListenerWatcher.
/**
* Add checkpoint listener which count the number of listener calls during each checkpoint.
*
* @param db Shared manager for manage the listeners.
* @param defaultRegion Region for which listener should be added.
* @return Integer which count the listener calls.
*/
@NotNull
private AtomicInteger checkpointListenerWatcher(GridCacheDatabaseSharedManager db, DataRegion defaultRegion) {
AtomicInteger checkpointListenerCounter = new AtomicInteger();
db.addCheckpointListener(new CheckpointListener() {
@Override
public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
checkpointListenerCounter.getAndIncrement();
}
@Override
public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
checkpointListenerCounter.getAndIncrement();
}
@Override
public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException {
checkpointListenerCounter.getAndIncrement();
}
}, defaultRegion);
return checkpointListenerCounter;
}
use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project ignite by apache.
the class IgniteWalRebalanceTest method testRebalanceReassignAndOwnPartitions.
/**
* Tests that owning partitions (that are trigged by rebalance future) cannot be mapped to a new rebalance future
* that was created by RebalanceReassignExchangeTask.
*
* @throws Exception If failed.
*/
@Test
public void testRebalanceReassignAndOwnPartitions() throws Exception {
backups = 3;
IgniteEx supplier1 = startGrid(0);
IgniteEx supplier2 = startGrid(1);
IgniteEx demander = startGrid(2);
supplier1.cluster().state(ACTIVE);
String cacheName1 = "test-cache-1";
String cacheName2 = "test-cache-2";
IgniteCache<Integer, IndexedObject> c1 = supplier1.getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>(cacheName1).setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, PARTS_CNT)).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setRebalanceOrder(10));
IgniteCache<Integer, IndexedObject> c2 = supplier1.getOrCreateCache(new CacheConfiguration<Integer, IndexedObject>(cacheName2).setBackups(backups).setAffinity(new RendezvousAffinityFunction(false, PARTS_CNT)).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setRebalanceOrder(20));
// Fill initial data.
final int entryCnt = PARTS_CNT * 200;
final int preloadEntryCnt = PARTS_CNT * 400;
int val = 0;
for (int k = 0; k < preloadEntryCnt; k++) {
c1.put(k, new IndexedObject(val++));
c2.put(k, new IndexedObject(val++));
}
forceCheckpoint();
stopGrid(2);
// This is an easy way to emulate missing partitions on the first rebalance.
for (int i = 0; i < entryCnt; i++) c1.put(i, new IndexedObject(val++));
// Full rebalance for the cacheName2.
for (int i = 0; i < preloadEntryCnt; i++) c2.put(i, new IndexedObject(val++));
// Delay rebalance process for specified groups.
blockMsgPred = (node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
return msg0.groupId() == CU.cacheId(cacheName1) || msg0.groupId() == CU.cacheId(cacheName2);
}
return false;
};
// Emulate missing partitions and trigger RebalanceReassignExchangeTask which should re-trigger a new rebalance.
FailingIOFactory ioFactory = injectFailingIOFactory(supplier1);
demander = startGrid(2);
TestRecordingCommunicationSpi demanderSpi = TestRecordingCommunicationSpi.spi(grid(2));
// Wait until demander starts rebalancning.
demanderSpi.waitForBlocked();
// Need to start a client node in order to block RebalanceReassignExchangeTask (and do not change the affinity)
// until cacheName2 triggers a checkpoint after rebalancing.
CountDownLatch blockClientJoin = new CountDownLatch(1);
CountDownLatch unblockClientJoin = new CountDownLatch(1);
demander.context().cache().context().exchange().registerExchangeAwareComponent(new PartitionsExchangeAware() {
@Override
public void onInitBeforeTopologyLock(GridDhtPartitionsExchangeFuture fut) {
blockClientJoin.countDown();
try {
if (!unblockClientJoin.await(getTestTimeout(), MILLISECONDS))
throw new IgniteException("Failed to wait for client node joinning the cluster.");
} catch (InterruptedException e) {
throw new IgniteException("Unexpected exception.", e);
}
}
});
startClientGrid(4);
// Wait for a checkpoint after rebalancing cacheName2.
CountDownLatch blockCheckpoint = new CountDownLatch(1);
CountDownLatch unblockCheckpoint = new CountDownLatch(1);
((GridCacheDatabaseSharedManager) demander.context().cache().context().database()).addCheckpointListener(new CheckpointListener() {
/**
* {@inheritDoc}
*/
@Override
public void onCheckpointBegin(Context ctx) throws IgniteCheckedException {
if (!ctx.progress().reason().contains(String.valueOf(CU.cacheId(cacheName2))))
return;
blockCheckpoint.countDown();
try {
if (!unblockCheckpoint.await(getTestTimeout(), MILLISECONDS))
throw new IgniteCheckedException("Failed to wait for unblocking checkpointer.");
} catch (InterruptedException e) {
throw new IgniteCheckedException("Unexpected exception", e);
}
}
/**
* {@inheritDoc}
*/
@Override
public void beforeCheckpointBegin(Context ctx) throws IgniteCheckedException {
}
/**
* {@inheritDoc}
*/
@Override
public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
}
});
// Unblock the first rebalance.
demanderSpi.stopBlock();
// Wait for start of the checkpoint after rebalancing cacheName2.
assertTrue("Failed to wait for checkpoint.", blockCheckpoint.await(getTestTimeout(), MILLISECONDS));
// Block the second rebalancing.
demanderSpi.blockMessages((node, msg) -> {
if (msg instanceof GridDhtPartitionDemandMessage) {
GridDhtPartitionDemandMessage msg0 = (GridDhtPartitionDemandMessage) msg;
return msg0.groupId() == CU.cacheId(cacheName1);
}
return false;
});
ioFactory.reset();
// Let's unblock client exchange and, therefore, handling of RebalanceReassignExchangeTask,
// which is already scheduled.
unblockClientJoin.countDown();
// Wait for starting the second rebalance (new chain of rebalance futures should be created at this point).
demanderSpi.waitForBlocked();
GridFutureAdapter checkpointFut = ((GridCacheDatabaseSharedManager) demander.context().cache().context().database()).getCheckpointer().currentProgress().futureFor(FINISHED);
// Unblock checkpointer.
unblockCheckpoint.countDown();
assertTrue("Failed to wait for a checkpoint.", GridTestUtils.waitForCondition(() -> checkpointFut.isDone(), getTestTimeout()));
// Well, there is a race between we unblock rebalance and the current checkpoint executes all its listeners.
demanderSpi.stopBlock();
awaitPartitionMapExchange(false, true, null);
}
use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project ignite by apache.
the class IgniteSnapshotManagerSelfTest method testSnapshotAlwaysStartsNewCheckpoint.
/**
* @throws Exception If fails.
*/
@Test
public void testSnapshotAlwaysStartsNewCheckpoint() throws Exception {
long testTimeout = 30_000;
listenLog = new ListeningTestLogger(log);
LogListener lsnr = LogListener.matches("Snapshot operation is scheduled on local node").times(1).build();
listenLog.registerListener(lsnr);
IgniteEx ignite = startGridsWithCache(1, 4096, key -> new Account(key, key), new CacheConfiguration<>(DEFAULT_CACHE_NAME));
assertTrue("Test requires that only forced checkpoints were allowed.", ignite.configuration().getDataStorageConfiguration().getCheckpointFrequency() >= TimeUnit.DAYS.toMillis(365));
GridCacheDatabaseSharedManager dbMgr = ((GridCacheDatabaseSharedManager) ignite.context().cache().context().database());
// Ensure that previous checkpoint finished.
dbMgr.getCheckpointer().currentProgress().futureFor(CheckpointState.FINISHED).get(testTimeout);
CountDownLatch beforeCpEnter = new CountDownLatch(1);
CountDownLatch beforeCpExit = new CountDownLatch(1);
// Block checkpointer on start.
dbMgr.addCheckpointListener(new CheckpointListener() {
@Override
public void beforeCheckpointBegin(CheckpointListener.Context ctx) throws IgniteCheckedException {
beforeCpEnter.countDown();
U.await(beforeCpExit, testTimeout, TimeUnit.MILLISECONDS);
}
@Override
public void onMarkCheckpointBegin(CheckpointListener.Context ctx) {
// No-op.
}
@Override
public void onCheckpointBegin(CheckpointListener.Context ctx) {
// No-op.
}
});
dbMgr.forceCheckpoint("snapshot-task-hang-test");
beforeCpEnter.await(testTimeout, TimeUnit.MILLISECONDS);
IgniteFuture<Void> snpFut = ignite.snapshot().createSnapshot(SNAPSHOT_NAME);
// Wait until the snapshot task checkpoint listener is registered.
assertTrue(GridTestUtils.waitForCondition(lsnr::check, testTimeout));
// Unblock checkpointer.
beforeCpExit.countDown();
// Make sure the snapshot has been taken.
snpFut.get(testTimeout);
}
use of org.apache.ignite.internal.processors.cache.persistence.checkpoint.CheckpointListener in project ignite by apache.
the class DurableBackgroundTasksProcessorSelfTest method testDontDeleteTaskIfItsRestart.
/**
* Check that the task will not be deleted from the MetaStorage if it was restarted.
*
* @throws Exception If failed.
*/
@Test
public void testDontDeleteTaskIfItsRestart() throws Exception {
IgniteEx n = startGrid(0);
ObservingCheckpointListener observingCpLsnr = new ObservingCheckpointListener();
dbMgr(n).addCheckpointListener(observingCpLsnr);
n.cluster().state(ACTIVE);
CheckpointWorkflow cpWorkflow = checkpointWorkflow(n);
List<CheckpointListener> cpLs = cpWorkflow.getRelevantCheckpointListeners(dbMgr(n).checkpointedDataRegions());
assertTrue(cpLs.contains(observingCpLsnr));
assertTrue(cpLs.contains(durableBackgroundTask(n)));
assertTrue(cpLs.indexOf(observingCpLsnr) < cpLs.indexOf(durableBackgroundTask(n)));
SimpleTask simpleTask0 = new SimpleTask("t");
IgniteInternalFuture<Void> taskFut = durableBackgroundTask(n).executeAsync(simpleTask0, true);
simpleTask0.onExecFut.get(getTestTimeout());
forceCheckpoint();
dbMgr(n).enableCheckpoints(false).get(getTestTimeout());
simpleTask0.taskFut.onDone(DurableBackgroundTaskResult.complete(null));
taskFut.get(getTestTimeout());
SimpleTask simpleTask1 = new SimpleTask("t");
AtomicReference<IgniteInternalFuture<Void>> taskFutRef = new AtomicReference<>();
observingCpLsnr.afterCheckpointEndConsumer = ctx -> taskFutRef.set(durableBackgroundTask(n).executeAsync(simpleTask1, true));
dbMgr(n).enableCheckpoints(true).get(getTestTimeout());
forceCheckpoint();
assertNotNull(metaStorageOperation(n, ms -> ms.read(metaStorageKey(simpleTask0))));
simpleTask1.onExecFut.get(getTestTimeout());
simpleTask1.taskFut.onDone(DurableBackgroundTaskResult.complete(null));
taskFutRef.get().get(getTestTimeout());
forceCheckpoint();
assertNull(metaStorageOperation(n, ms -> ms.read(metaStorageKey(simpleTask1))));
}
Aggregations