use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class IgnitePdsReserveWalSegmentsTest method testNotTruncateSegmentsForBinaryRecovery.
/**
* Checking that there will be no truncation of segments required for binary recovery.
*
* @throws Exception If failed.
*/
@Test
public void testNotTruncateSegmentsForBinaryRecovery() throws Exception {
IgniteEx n = prepareGrid(1);
IgniteWriteAheadLogManager wal = n.context().cache().context().wal();
assertNotNull(wal);
long resIdx = getReservedWalSegmentIndex(wal);
assertTrue(resIdx > 3);
WALPointer lastCheckpointPtr = lastCheckpointPointer(n);
assertEquals(lastCheckpointPtr.index(), resIdx);
wal.notchLastCheckpointPtr(new WALPointer(1, 0, 0));
if (compactionEnabled(n))
assertTrue(waitForCondition(() -> wal.lastCompactedSegment() >= 1, 10_000));
int truncated = wal.truncate(lastCheckpointPtr);
assertTrue("truncated: " + truncated, truncated >= 1);
truncated = wal.truncate(lastCheckpointPtr);
assertEquals(0, truncated);
wal.notchLastCheckpointPtr(new WALPointer(2, 0, 0));
if (compactionEnabled(n))
assertTrue(waitForCondition(() -> wal.lastCompactedSegment() >= 2, 10_000));
truncated = wal.truncate(lastCheckpointPtr);
assertTrue("truncated: " + truncated, truncated >= 1);
truncated = wal.truncate(lastCheckpointPtr);
assertEquals(0, truncated);
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method testGetForInitialWrite.
/**
* @throws Exception if failed.
*/
@Test
public void testGetForInitialWrite() throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
GridCacheSharedContext<Object, Object> shared = ig.context().cache().context();
int cacheId = shared.cache().cache(CACHE_NAME).context().cacheId();
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) shared.database();
// Disable integrated checkpoint thread.
dbMgr.enableCheckpoints(false);
PageMemory mem = shared.database().dataRegion(null).pageMemory();
IgniteWriteAheadLogManager wal = shared.wal();
WALPointer start = wal.log(new CheckpointRecord(null));
final FullPageId[] initWrites = new FullPageId[10];
ig.context().cache().context().database().checkpointReadLock();
try {
for (int i = 0; i < initWrites.length; i++) initWrites[i] = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId);
// Check getForInitialWrite methods.
for (FullPageId fullId : initWrites) {
long page = mem.acquirePage(fullId.groupId(), fullId.pageId());
try {
long pageAddr = mem.writeLock(fullId.groupId(), fullId.pageId(), page);
try {
DataPageIO.VERSIONS.latest().initNewPage(pageAddr, fullId.pageId(), mem.realPageSize(fullId.groupId()), null);
for (int i = PageIO.COMMON_HEADER_END + DataPageIO.ITEMS_OFF; i < mem.pageSize(); i++) PageUtils.putByte(pageAddr, i, (byte) 0xAB);
PageIO.printPage(pageAddr, mem.realPageSize(fullId.groupId()));
} finally {
mem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, true);
}
} finally {
mem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
}
wal.flush(null, false);
} finally {
ig.context().cache().context().database().checkpointReadUnlock();
stopAllGrids(false);
}
ig = startGrid(0);
ig.cluster().active(true);
shared = ig.context().cache().context();
dbMgr = (GridCacheDatabaseSharedManager) shared.database();
dbMgr.enableCheckpoints(false);
wal = shared.wal();
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
it.next();
for (FullPageId initialWrite : initWrites) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
assertTrue(String.valueOf(tup.get2()), tup.get2() instanceof PageSnapshot);
PageSnapshot snap = (PageSnapshot) tup.get2();
FullPageId actual = snap.fullPageId();
// there are extra tracking pages, skip them
if (TrackingPageIO.VERSIONS.latest().trackingPageFor(actual.pageId(), mem.pageSize()) == actual.pageId()) {
tup = it.next();
assertTrue(tup.get2() instanceof PageSnapshot);
actual = ((PageSnapshot) tup.get2()).fullPageId();
}
assertEquals(initialWrite, actual);
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method testCheckpointSimulationMultiThreaded.
/**
* @throws Exception if failed.
*/
@Test
public void testCheckpointSimulationMultiThreaded() throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
GridCacheSharedContext<Object, Object> shared = ig.context().cache().context();
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) shared.database();
IgnitePageStoreManager pageStore = shared.pageStore();
U.sleep(1_000);
// Disable integrated checkpoint thread.
dbMgr.enableCheckpoints(false).get();
// Must put something in partition 0 in order to initialize meta page.
// Otherwise we will violate page store integrity rules.
ig.cache(CACHE_NAME).put(0, 0);
PageMemory mem = shared.database().dataRegion(null).pageMemory();
IgniteBiTuple<Map<FullPageId, Integer>, WALPointer> res;
try {
res = runCheckpointing(ig, (PageMemoryImpl) mem, pageStore, shared.wal(), shared.cache().cache(CACHE_NAME).context().cacheId());
} catch (Throwable th) {
log().error("Error while running checkpointing", th);
throw th;
} finally {
dbMgr.enableCheckpoints(true).get();
stopAllGrids(false);
}
ig = startGrid(0);
ig.cluster().active(true);
shared = ig.context().cache().context();
dbMgr = (GridCacheDatabaseSharedManager) shared.database();
dbMgr.enableCheckpoints(false).get();
mem = shared.database().dataRegion(null).pageMemory();
verifyReads(ig.context(), res.get1(), mem, res.get2(), shared.wal());
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method checkDataWalEntries.
/**
* @throws Exception if failed.
*/
private void checkDataWalEntries(boolean mvcc) throws Exception {
IgniteEx ig = startGrid(0);
ig.cluster().active(true);
GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
GridCacheContext<Object, Object> cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
IgniteWriteAheadLogManager wal = sharedCtx.wal();
assertTrue(wal.isAlwaysWriteFullPages());
db.enableCheckpoints(false).get();
final int cnt = 10;
List<DataEntry> entries = new ArrayList<>(cnt);
for (int i = 0; i < cnt; i++) {
GridCacheOperation op = i % 2 == 0 ? GridCacheOperation.UPDATE : GridCacheOperation.DELETE;
KeyCacheObject key = cctx.toCacheKeyObject(i);
CacheObject val = null;
if (op != GridCacheOperation.DELETE)
val = cctx.toCacheObject("value-" + i);
entries.add(mvcc ? new MvccDataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, new MvccVersionImpl(1000L, 10L, i + 1)) : new DataEntry(cctx.cacheId(), key, val, op, null, cctx.cache().nextVersion(), 0L, cctx.affinity().partition(i), i, DataEntry.EMPTY_FLAGS));
}
UUID cpId = UUID.randomUUID();
WALPointer start = wal.log(new CheckpointRecord(cpId, null));
wal.flush(start, false);
for (DataEntry entry : entries) wal.log(mvcc ? new MvccDataRecord((MvccDataEntry) entry) : new DataRecord(entry));
// Data will not be written to the page store.
stopAllGrids();
ig = startGrid(0);
ig.cluster().active(true);
sharedCtx = ig.context().cache().context();
cctx = sharedCtx.cache().cache(mvcc ? MVCC_CACHE_NAME : CACHE_NAME).context();
db = (GridCacheDatabaseSharedManager) sharedCtx.database();
wal = sharedCtx.wal();
db.enableCheckpoints(false).get();
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> cpRecordTup = it.next();
assert cpRecordTup.get2() instanceof CheckpointRecord;
assertEquals(start, cpRecordTup.get1());
CheckpointRecord cpRec = (CheckpointRecord) cpRecordTup.get2();
assertEquals(cpId, cpRec.checkpointId());
assertNull(cpRec.checkpointMark());
assertFalse(cpRec.end());
int idx = 0;
CacheObjectContext coctx = cctx.cacheObjectContext();
while (idx < entries.size()) {
IgniteBiTuple<WALPointer, WALRecord> dataRecTup = it.next();
if (!mvcc)
assert dataRecTup.get2() instanceof DataRecord;
else
assert dataRecTup.get2() instanceof MvccDataRecord;
DataRecord dataRec = (DataRecord) dataRecTup.get2();
DataEntry entry = entries.get(idx);
assertEquals(1, dataRec.entryCount());
DataEntry readEntry = dataRec.get(0);
assertEquals(entry.cacheId(), readEntry.cacheId());
assertEquals(entry.key().<Integer>value(coctx, true), readEntry.key().<Integer>value(coctx, true));
assertEquals(entry.op(), readEntry.op());
if (entry.op() == GridCacheOperation.UPDATE)
assertEquals(entry.value().value(coctx, true), readEntry.value().value(coctx, true));
else
assertNull(entry.value());
assertEquals(entry.writeVersion(), readEntry.writeVersion());
assertEquals(entry.nearXidVersion(), readEntry.nearXidVersion());
assertEquals(entry.partitionCounter(), readEntry.partitionCounter());
if (mvcc) {
assert entry instanceof MvccDataEntry;
assert readEntry instanceof MvccDataEntry;
assertEquals(((MvccDataEntry) entry).mvccVer(), ((MvccDataEntry) readEntry).mvccVer());
}
idx++;
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method verifyReads.
/**
* @param res Result map to verify.
* @param mem Memory.
*/
private void verifyReads(GridKernalContext ctx, Map<FullPageId, Integer> res, PageMemory mem, WALPointer start, IgniteWriteAheadLogManager wal) throws Exception {
Map<FullPageId, byte[]> replay = new HashMap<>();
ByteBuffer buf = ByteBuffer.allocateDirect(mem.pageSize()).order(ByteOrder.nativeOrder());
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
assertTrue("Invalid record: " + tup, tup.get2() instanceof CheckpointRecord);
CheckpointRecord cpRec = (CheckpointRecord) tup.get2();
while (it.hasNext()) {
tup = it.next();
WALRecord rec = tup.get2();
if (rec instanceof CheckpointRecord) {
CheckpointRecord end = (CheckpointRecord) rec;
// Found the finish mark.
if (end.checkpointId().equals(cpRec.checkpointId()) && end.end())
break;
} else if (rec instanceof PageSnapshot) {
PageSnapshot page = (PageSnapshot) rec;
int realPageSize = mem.realPageSize(page.groupId());
byte[] pageData = page.pageData();
if (page.pageDataSize() < realPageSize) {
buf.clear();
buf.put(pageData).flip();
ctx.compress().decompressPage(buf, realPageSize);
pageData = new byte[buf.limit()];
buf.get(pageData);
}
replay.put(page.fullPageId(), pageData);
}
}
}
// Check read-through from the file store.
for (Map.Entry<FullPageId, Integer> entry : res.entrySet()) {
FullPageId fullId = entry.getKey();
int state = entry.getValue();
if (state == -1) {
info("Page was never written: " + fullId);
continue;
}
byte[] walData = replay.get(fullId);
assertNotNull("Missing WAL record for a written page: " + fullId, walData);
long page = mem.acquirePage(fullId.groupId(), fullId.pageId());
try {
long pageAddr = mem.readLock(fullId.groupId(), fullId.pageId(), page);
try {
for (int i = PageIO.COMMON_HEADER_END; i < mem.realPageSize(fullId.groupId()); i++) {
int expState = state & 0xFF;
int pageState = PageUtils.getByte(pageAddr, i) & 0xFF;
int walState = walData[i] & 0xFF;
if (expState != pageState)
assertEquals("Invalid state [pageId=" + fullId + ", pos=" + i + ']', expState, pageState);
if (expState != walState)
assertEquals("Invalid WAL state [pageId=" + fullId + ", pos=" + i + ']', expState, walState);
}
} finally {
mem.readUnlock(fullId.groupId(), fullId.pageId(), page);
}
} finally {
mem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
}
}
Aggregations