use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class GridCacheProcessor method onExchangeDone.
/**
* Callback invoked when first exchange future for dynamic cache is completed.
*
* @param cacheStartVer Started caches version to create proxy for.
* @param exchActions Change requests.
* @param err Error.
*/
@SuppressWarnings("unchecked")
public void onExchangeDone(AffinityTopologyVersion cacheStartVer, @Nullable ExchangeActions exchActions, @Nullable Throwable err) {
initCacheProxies(cacheStartVer, err);
if (exchActions == null)
return;
if (exchActions.systemCachesStarting() && exchActions.stateChangeRequest() == null) {
ctx.dataStructures().restoreStructuresState(ctx);
ctx.service().updateUtilityCache();
}
if (err == null) {
// Force checkpoint if there is any cache stop request
if (exchActions.cacheStopRequests().size() > 0) {
try {
sharedCtx.database().waitForCheckpoint("caches stop");
} catch (IgniteCheckedException e) {
U.error(log, "Failed to wait for checkpoint finish during cache stop.", e);
}
}
for (ExchangeActions.CacheActionData action : exchActions.cacheStopRequests()) {
CacheGroupContext gctx = cacheGrps.get(action.descriptor().groupId());
// Cancel all operations blocking gateway
if (gctx != null) {
final String msg = "Failed to wait for topology update, cache group is stopping.";
// If snapshot operation in progress we must throw CacheStoppedException
// for correct cache proxy restart. For more details see
// IgniteCacheProxy.cacheException()
gctx.affinity().cancelFutures(new CacheStoppedException(msg));
}
stopGateway(action.request());
sharedCtx.database().checkpointReadLock();
try {
prepareCacheStop(action.request().cacheName(), action.request().destroy());
} finally {
sharedCtx.database().checkpointReadUnlock();
}
}
sharedCtx.database().checkpointReadLock();
try {
// Do not invoke checkpoint listeners for groups are going to be destroyed to prevent metadata corruption.
for (ExchangeActions.CacheGroupActionData action : exchActions.cacheGroupsToStop()) {
Integer groupId = action.descriptor().groupId();
CacheGroupContext grp = cacheGrps.get(groupId);
if (grp != null && grp.persistenceEnabled() && sharedCtx.database() instanceof GridCacheDatabaseSharedManager) {
GridCacheDatabaseSharedManager mngr = (GridCacheDatabaseSharedManager) sharedCtx.database();
mngr.removeCheckpointListener((DbCheckpointListener) grp.offheap());
}
}
} finally {
sharedCtx.database().checkpointReadUnlock();
}
List<IgniteBiTuple<CacheGroupContext, Boolean>> stoppedGroups = new ArrayList<>();
for (ExchangeActions.CacheGroupActionData action : exchActions.cacheGroupsToStop()) {
Integer groupId = action.descriptor().groupId();
if (cacheGrps.containsKey(groupId)) {
stoppedGroups.add(F.t(cacheGrps.get(groupId), action.destroy()));
stopCacheGroup(groupId);
}
}
if (!sharedCtx.kernalContext().clientNode())
sharedCtx.database().onCacheGroupsStopped(stoppedGroups);
if (exchActions.deactivate())
sharedCtx.deactivate();
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class GridDhtPartitionTopologyImpl method initPartitions0.
/**
* @param affVer Affinity version to use.
* @param exchFut Exchange future.
* @param updateSeq Update sequence.
* @return {@code True} if partitions must be refreshed.
*/
private boolean initPartitions0(AffinityTopologyVersion affVer, GridDhtPartitionsExchangeFuture exchFut, long updateSeq) {
List<List<ClusterNode>> aff = grp.affinity().readyAssignments(affVer);
boolean needRefresh = false;
if (grp.affinityNode()) {
ClusterNode loc = ctx.localNode();
ClusterNode oldest = discoCache.oldestAliveServerNode();
GridDhtPartitionExchangeId exchId = exchFut.exchangeId();
assert grp.affinity().lastVersion().equals(affVer) : "Invalid affinity [topVer=" + grp.affinity().lastVersion() + ", grp=" + grp.cacheOrGroupName() + ", affVer=" + affVer + ", fut=" + exchFut + ']';
int num = grp.affinity().partitions();
if (grp.rebalanceEnabled()) {
boolean added = exchFut.cacheGroupAddedOnExchange(grp.groupId(), grp.receivedFrom());
boolean first = added || (loc.equals(oldest) && loc.id().equals(exchId.nodeId()) && exchId.isJoined());
if (first) {
assert exchId.isJoined() || added;
for (int p = 0; p < num; p++) {
if (localNode(p, aff) || initLocalPartition(p, discoCache)) {
GridDhtLocalPartition locPart = createPartition(p);
if (grp.persistenceEnabled()) {
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) grp.shared().database();
locPart.restoreState(db.readPartitionState(grp, locPart.id()));
} else {
boolean owned = locPart.own();
assert owned : "Failed to own partition for oldest node [grp=" + grp.cacheOrGroupName() + ", part=" + locPart + ']';
if (log.isDebugEnabled())
log.debug("Owned partition for oldest node [grp=" + grp.cacheOrGroupName() + ", part=" + locPart + ']');
}
needRefresh = true;
updateSeq = updateLocal(p, locPart.state(), updateSeq, affVer);
}
}
} else
createPartitions(affVer, aff, updateSeq);
} else {
// the partitions this node is not responsible for.
for (int p = 0; p < num; p++) {
GridDhtLocalPartition locPart = localPartition0(p, affVer, false, true, false);
boolean belongs = localNode(p, aff);
if (locPart != null) {
if (!belongs) {
GridDhtPartitionState state = locPart.state();
if (state.active()) {
locPart.rent(false);
updateSeq = updateLocal(p, locPart.state(), updateSeq, affVer);
if (log.isDebugEnabled()) {
log.debug("Evicting partition with rebalancing disabled (it does not belong to " + "affinity) [grp=" + grp.cacheOrGroupName() + ", part=" + locPart + ']');
}
}
} else
locPart.own();
} else if (belongs) {
locPart = createPartition(p);
locPart.own();
updateLocal(p, locPart.state(), updateSeq, affVer);
}
}
}
}
updateRebalanceVersion(aff);
return needRefresh;
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method testDataWalEntries.
/**
* @throws Exception if failed.
*/
public void testDataWalEntries() throws Exception {
IgniteEx ig = startGrid(0);
ig.active(true);
GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
GridCacheContext<Object, Object> cctx = sharedCtx.cache().cache(cacheName).context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
IgniteWriteAheadLogManager wal = sharedCtx.wal();
assertTrue(wal.isAlwaysWriteFullPages());
db.enableCheckpoints(false).get();
final int cnt = 10;
List<DataEntry> entries = new ArrayList<>(cnt);
for (int i = 0; i < cnt; i++) {
GridCacheOperation op = i % 2 == 0 ? GridCacheOperation.UPDATE : GridCacheOperation.DELETE;
KeyCacheObject key = cctx.toCacheKeyObject(i);
CacheObject val = null;
if (op != GridCacheOperation.DELETE)
val = cctx.toCacheObject("value-" + i);
entries.add(new DataEntry(cctx.cacheId(), key, val, op, null, cctx.versions().next(), 0L, cctx.affinity().partition(i), i));
}
UUID cpId = UUID.randomUUID();
WALPointer start = wal.log(new CheckpointRecord(cpId, null));
wal.fsync(start);
for (DataEntry entry : entries) wal.log(new DataRecord(entry));
// Data will not be written to the page store.
stopAllGrids();
ig = startGrid(0);
ig.active(true);
sharedCtx = ig.context().cache().context();
cctx = sharedCtx.cache().cache(cacheName).context();
db = (GridCacheDatabaseSharedManager) sharedCtx.database();
wal = sharedCtx.wal();
db.enableCheckpoints(false).get();
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> cpRecordTup = it.next();
assert cpRecordTup.get2() instanceof CheckpointRecord;
assertEquals(start, cpRecordTup.get1());
CheckpointRecord cpRec = (CheckpointRecord) cpRecordTup.get2();
assertEquals(cpId, cpRec.checkpointId());
assertNull(cpRec.checkpointMark());
assertFalse(cpRec.end());
int idx = 0;
CacheObjectContext coctx = cctx.cacheObjectContext();
while (idx < entries.size()) {
IgniteBiTuple<WALPointer, WALRecord> dataRecTup = it.next();
assert dataRecTup.get2() instanceof DataRecord;
DataRecord dataRec = (DataRecord) dataRecTup.get2();
DataEntry entry = entries.get(idx);
assertEquals(1, dataRec.writeEntries().size());
DataEntry readEntry = dataRec.writeEntries().get(0);
assertEquals(entry.cacheId(), readEntry.cacheId());
assertEquals(entry.key().<Integer>value(coctx, true), readEntry.key().<Integer>value(coctx, true));
assertEquals(entry.op(), readEntry.op());
if (entry.op() == GridCacheOperation.UPDATE)
assertEquals(entry.value().value(coctx, true), readEntry.value().value(coctx, true));
else
assertNull(entry.value());
assertEquals(entry.writeVersion(), readEntry.writeVersion());
assertEquals(entry.nearXidVersion(), readEntry.nearXidVersion());
assertEquals(entry.partitionCounter(), readEntry.partitionCounter());
idx++;
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method testPageWalEntries.
/**
* @throws Exception if failed.
*/
public void testPageWalEntries() throws Exception {
IgniteEx ig = startGrid(0);
ig.active(true);
GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
int cacheId = sharedCtx.cache().cache(cacheName).context().cacheId();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
PageMemory pageMem = sharedCtx.database().dataRegion(null).pageMemory();
IgniteWriteAheadLogManager wal = sharedCtx.wal();
db.enableCheckpoints(false).get();
int pageCnt = 100;
List<FullPageId> pageIds = new ArrayList<>();
for (int i = 0; i < pageCnt; i++) {
db.checkpointReadLock();
try {
pageIds.add(new FullPageId(pageMem.allocatePage(cacheId, PageIdAllocator.INDEX_PARTITION, PageIdAllocator.FLAG_IDX), cacheId));
} finally {
db.checkpointReadUnlock();
}
}
UUID cpId = UUID.randomUUID();
WALPointer start = wal.log(new CheckpointRecord(cpId, null));
wal.fsync(start);
ig.context().cache().context().database().checkpointReadLock();
try {
for (FullPageId pageId : pageIds) writePageData(pageId, pageMem);
} finally {
ig.context().cache().context().database().checkpointReadUnlock();
}
// Data will not be written to the page store.
stopAllGrids();
ig = startGrid(0);
ig.active(true);
sharedCtx = ig.context().cache().context();
db = (GridCacheDatabaseSharedManager) sharedCtx.database();
wal = sharedCtx.wal();
db.enableCheckpoints(false);
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
assert tup.get2() instanceof CheckpointRecord : tup.get2();
assertEquals(start, tup.get1());
CheckpointRecord cpRec = (CheckpointRecord) tup.get2();
assertEquals(cpId, cpRec.checkpointId());
assertNull(cpRec.checkpointMark());
assertFalse(cpRec.end());
int idx = 0;
while (idx < pageIds.size()) {
tup = it.next();
assert tup.get2() instanceof PageSnapshot : tup.get2().getClass();
PageSnapshot snap = (PageSnapshot) tup.get2();
// there are extra tracking pages, skip them
if (TrackingPageIO.VERSIONS.latest().trackingPageFor(snap.fullPageId().pageId(), pageMem.pageSize()) == snap.fullPageId().pageId()) {
tup = it.next();
assertTrue(tup.get2() instanceof PageSnapshot);
snap = (PageSnapshot) tup.get2();
}
assertEquals(pageIds.get(idx), snap.fullPageId());
idx++;
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager in project ignite by apache.
the class IgniteWalRecoveryTest method testTxRecordsConsistency.
/**
* Test that all DataRecord WAL records are within transaction boundaries - PREPARED and COMMITTED markers.
*
* @throws Exception If any fail.
*/
public void testTxRecordsConsistency() throws Exception {
System.setProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS, "true");
IgniteEx ignite = (IgniteEx) startGrids(3);
ignite.active(true);
try {
final String cacheName = "transactional";
CacheConfiguration<Object, Object> cacheConfiguration = new CacheConfiguration<>(cacheName).setAtomicityMode(CacheAtomicityMode.TRANSACTIONAL).setAffinity(new RendezvousAffinityFunction(false, 32)).setCacheMode(CacheMode.PARTITIONED).setRebalanceMode(CacheRebalanceMode.SYNC).setWriteSynchronizationMode(CacheWriteSynchronizationMode.FULL_SYNC).setBackups(0);
ignite.createCache(cacheConfiguration);
IgniteCache<Object, Object> cache = ignite.cache(cacheName);
GridCacheSharedContext<Object, Object> sharedCtx = ignite.context().cache().context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
db.waitForCheckpoint("test");
db.enableCheckpoints(false).get();
// Log something to know where to start.
WALPointer startPtr = sharedCtx.wal().log(new MemoryRecoveryRecord(U.currentTimeMillis()));
final int transactions = 100;
final int operationsPerTransaction = 40;
Random random = new Random();
for (int t = 1; t <= transactions; t++) {
Transaction tx = ignite.transactions().txStart(TransactionConcurrency.OPTIMISTIC, TransactionIsolation.READ_COMMITTED);
for (int op = 0; op < operationsPerTransaction; op++) {
int key = random.nextInt(1000) + 1;
Object value;
if (random.nextBoolean())
value = randomString(random) + key;
else
value = new BigObject(key);
cache.put(key, value);
}
if (random.nextBoolean()) {
tx.commit();
} else {
tx.rollback();
}
if (t % 50 == 0)
log.info("Finished transaction " + t);
}
Set<GridCacheVersion> activeTransactions = new HashSet<>();
// Check that all DataRecords are within PREPARED and COMMITTED tx records.
try (WALIterator it = sharedCtx.wal().replay(startPtr)) {
while (it.hasNext()) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
WALRecord rec = tup.get2();
if (rec instanceof TxRecord) {
TxRecord txRecord = (TxRecord) rec;
GridCacheVersion txId = txRecord.nearXidVersion();
switch(txRecord.state()) {
case PREPARED:
assert !activeTransactions.contains(txId) : "Transaction is already present " + txRecord;
activeTransactions.add(txId);
break;
case COMMITTED:
assert activeTransactions.contains(txId) : "No PREPARE marker for transaction " + txRecord;
activeTransactions.remove(txId);
break;
case ROLLED_BACK:
activeTransactions.remove(txId);
break;
default:
throw new IllegalStateException("Unknown Tx state of record " + txRecord);
}
} else if (rec instanceof DataRecord) {
DataRecord dataRecord = (DataRecord) rec;
for (DataEntry entry : dataRecord.writeEntries()) {
GridCacheVersion txId = entry.nearXidVersion();
assert activeTransactions.contains(txId) : "No transaction for entry " + entry;
}
}
}
}
} finally {
System.clearProperty(IgniteSystemProperties.IGNITE_WAL_LOG_TX_RECORDS);
stopAllGrids();
}
}
Aggregations