use of org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl in project ignite by apache.
the class GridCacheDatabaseSharedManager method createPageMemory.
/**
* {@inheritDoc}
*/
@Override
protected PageMemory createPageMemory(DirectMemoryProvider memProvider, DataStorageConfiguration memCfg, DataRegionConfiguration plcCfg, DataRegionMetricsImpl memMetrics, final boolean trackable) {
if (!plcCfg.isPersistenceEnabled())
return super.createPageMemory(memProvider, memCfg, plcCfg, memMetrics, trackable);
memMetrics.persistenceEnabled(true);
long cacheSize = plcCfg.getMaxSize();
// Checkpoint buffer size can not be greater than cache size, it does not make sense.
long chpBufSize = checkpointBufferSize(plcCfg);
if (chpBufSize > cacheSize) {
U.quietAndInfo(log, "Configured checkpoint page buffer size is too big, setting to the max region size [size=" + U.readableSize(cacheSize, false) + ", memPlc=" + plcCfg.getName() + ']');
chpBufSize = cacheSize;
}
GridInClosure3X<Long, FullPageId, PageMemoryEx> changeTracker;
if (trackable)
changeTracker = new GridInClosure3X<Long, FullPageId, PageMemoryEx>() {
@Override
public void applyx(Long page, FullPageId fullId, PageMemoryEx pageMem) throws IgniteCheckedException {
if (trackable)
snapshotMgr.onChangeTrackerPage(page, fullId, pageMem);
}
};
else
changeTracker = null;
PageMemoryImpl pageMem = new PageMemoryImpl(memProvider, calculateFragmentSizes(memCfg.getConcurrencyLevel(), cacheSize, chpBufSize), cctx, memCfg.getPageSize(), (fullId, pageBuf, tag) -> {
// First of all, write page to disk.
storeMgr.write(fullId.groupId(), fullId.pageId(), pageBuf, tag);
// Only after write we can write page into snapshot.
snapshotMgr.flushDirtyPageHandler(fullId, pageBuf, tag);
AtomicInteger cntr = evictedPagesCntr;
if (cntr != null)
cntr.incrementAndGet();
}, changeTracker, this, memMetrics, resolveThrottlingPolicy(), this);
memMetrics.pageMemory(pageMem);
return pageMem;
}
use of org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl in project ignite by apache.
the class IgnitePdsRecoveryAfterFileCorruptionTest method generateWal.
/**
* @param mem Mem.
* @param storeMgr Store manager.
* @param wal Wal.
* @param cacheId Cache id.
* @param pages Pages.
*/
private void generateWal(final PageMemoryImpl mem, final IgnitePageStoreManager storeMgr, final IgniteWriteAheadLogManager wal, final int cacheId, FullPageId[] pages) throws Exception {
// Mark the start position.
CheckpointRecord cpRec = new CheckpointRecord(null);
WALPointer start = wal.log(cpRec);
wal.flush(start, false);
for (FullPageId fullId : pages) {
long page = mem.acquirePage(fullId.groupId(), fullId.pageId());
try {
long pageAddr = mem.writeLock(fullId.groupId(), fullId.pageId(), page);
PageIO.setPageId(pageAddr, fullId.pageId());
try {
for (int j = PageIO.COMMON_HEADER_END; j < mem.realPageSize(fullId.groupId()); j += 4) PageUtils.putInt(pageAddr, j, j + (int) fullId.pageId());
} finally {
mem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, true);
}
} finally {
mem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
}
Collection<FullPageId> pageIds = mem.beginCheckpoint(new GridFinishedFuture());
info("Acquired pages for checkpoint: " + pageIds.size());
try {
long begin = System.currentTimeMillis();
long cp = 0;
AtomicLong write = new AtomicLong();
PageStoreWriter pageStoreWriter = (fullPageId, buf, tag) -> {
int groupId = fullPageId.groupId();
long pageId = fullPageId.pageId();
for (int j = PageIO.COMMON_HEADER_END; j < mem.realPageSize(groupId); j += 4) assertEquals(j + (int) pageId, buf.getInt(j));
buf.rewind();
long writeStart = System.nanoTime();
storeMgr.write(cacheId, pageId, buf, tag, true);
long writeEnd = System.nanoTime();
write.getAndAdd(writeEnd - writeStart);
};
ByteBuffer tmpBuf = ByteBuffer.allocate(mem.pageSize());
tmpBuf.order(ByteOrder.nativeOrder());
for (FullPageId fullId : pages) {
if (pageIds.contains(fullId)) {
long cpStart = System.nanoTime();
mem.checkpointWritePage(fullId, tmpBuf, pageStoreWriter, null);
long cpEnd = System.nanoTime();
cp += cpEnd - cpStart;
}
}
long syncStart = System.currentTimeMillis();
storeMgr.sync(cacheId, 0);
long end = System.currentTimeMillis();
info("Written pages in " + (end - begin) + "ms, copy took " + (cp / 1_000_000) + "ms, " + "write took " + (write.get() / 1_000_000) + "ms, sync took " + (end - syncStart) + "ms");
} finally {
info("Finishing checkpoint...");
mem.finishCheckpoint();
info("Finished checkpoint");
}
wal.flush(wal.log(new CheckpointRecord(null)), false);
for (FullPageId fullId : pages) {
long page = mem.acquirePage(fullId.groupId(), fullId.pageId());
try {
assertFalse("Page has a temp heap copy after the last checkpoint: [cacheId=" + fullId.groupId() + ", pageId=" + fullId.pageId() + "]", mem.hasTempCopy(page));
assertFalse("Page is dirty after the last checkpoint: [cacheId=" + fullId.groupId() + ", pageId=" + fullId.pageId() + "]", mem.isDirty(fullId.groupId(), fullId.pageId(), page));
} finally {
mem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
}
}
use of org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl in project ignite by apache.
the class PageMemoryTracker method start.
/**
* Start tracking pages.
*/
synchronized void start() {
if (!isEnabled() || started)
return;
pageSize = ctx.igniteConfiguration().getDataStorageConfiguration().getPageSize();
pageMemoryMock = mockPageMemory();
GridCacheSharedContext sharedCtx = gridCtx.cache().context();
// Initialize one memory region for all data regions of target ignite node.
long maxMemorySize = 0;
for (DataRegion dataRegion : sharedCtx.database().dataRegions()) {
if (dataRegion.pageMemory() instanceof PageMemoryImpl)
maxMemorySize += dataRegion.config().getMaxSize();
}
long[] chunks = new long[] { maxMemorySize };
memoryProvider = new UnsafeMemoryProvider(log);
memoryProvider.initialize(chunks);
memoryRegion = memoryProvider.nextRegion();
GridUnsafe.setMemory(memoryRegion.address(), memoryRegion.size(), (byte) 0);
maxPages = (int) (maxMemorySize / pageSize);
pageSlots = new DirectMemoryPageSlot[maxPages];
freeSlotsCnt = maxPages;
tmpBuf1 = ByteBuffer.allocateDirect(pageSize);
tmpBuf2 = ByteBuffer.allocateDirect(pageSize);
if (cfg.isCheckPagesOnCheckpoint()) {
checkpointLsnr = new CheckpointListener() {
@Override
public void onMarkCheckpointBegin(Context ctx) throws IgniteCheckedException {
if (!checkPages(false, true))
throw new IgniteCheckedException("Page memory is inconsistent after applying WAL delta records.");
}
@Override
public void beforeCheckpointBegin(Context ctx) {
/* No-op. */
}
@Override
public void onCheckpointBegin(Context ctx) {
/* No-op. */
}
};
((GridCacheDatabaseSharedManager) gridCtx.cache().context().database()).addCheckpointListener(checkpointLsnr);
}
lastPageIdx = 0;
started = true;
log.info("PageMemory tracker started, " + U.readableSize(maxMemorySize, false) + " offheap memory allocated.");
}
use of org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method runCheckpointing.
/**
* @param mem Memory to use.
* @param storeMgr Store manager.
* @param cacheId Cache ID.
* @return Result map of random operations.
* @throws Exception If failure occurred.
*/
private IgniteBiTuple<Map<FullPageId, Integer>, WALPointer> runCheckpointing(final IgniteEx ig, final PageMemoryImpl mem, final IgnitePageStoreManager storeMgr, final IgniteWriteAheadLogManager wal, final int cacheId) throws Exception {
final ConcurrentMap<FullPageId, Integer> resMap = new ConcurrentHashMap<>();
final FullPageId[] pages = new FullPageId[TOTAL_PAGES];
Set<FullPageId> allocated = new HashSet<>();
IgniteCacheDatabaseSharedManager db = ig.context().cache().context().database();
PageIO pageIO = new DummyPageIO();
for (int i = 0; i < TOTAL_PAGES; i++) {
FullPageId fullId;
db.checkpointReadLock();
try {
fullId = new FullPageId(mem.allocatePage(cacheId, 0, PageIdAllocator.FLAG_DATA), cacheId);
initPage(mem, pageIO, fullId);
} finally {
db.checkpointReadUnlock();
}
resMap.put(fullId, -1);
pages[i] = fullId;
allocated.add(fullId);
}
final AtomicBoolean run = new AtomicBoolean(true);
// Simulate transaction lock.
final ReadWriteLock updLock = new ReentrantReadWriteLock();
// Mark the start position.
CheckpointRecord cpRec = new CheckpointRecord(null);
WALPointer start = wal.log(cpRec);
wal.flush(start, false);
IgniteInternalFuture<Long> updFut = GridTestUtils.runMultiThreadedAsync(new Callable<Object>() {
@Override
public Object call() throws Exception {
while (true) {
FullPageId fullId = pages[ThreadLocalRandom.current().nextInt(TOTAL_PAGES)];
updLock.readLock().lock();
try {
if (!run.get())
return null;
ig.context().cache().context().database().checkpointReadLock();
try {
long page = mem.acquirePage(fullId.groupId(), fullId.pageId());
try {
long pageAddr = mem.writeLock(fullId.groupId(), fullId.pageId(), page);
PageIO.setPageId(pageAddr, fullId.pageId());
try {
int state = resMap.get(fullId);
if (state != -1) {
if (VERBOSE)
info("Verify page [fullId=" + fullId + ", state=" + state + ", buf=" + pageAddr + ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) + ", page=" + U.hexLong(System.identityHashCode(page)) + ']');
for (int i = PageIO.COMMON_HEADER_END; i < mem.realPageSize(fullId.groupId()); i++) {
assertEquals("Verify page failed [fullId=" + fullId + ", i=" + i + ", state=" + state + ", buf=" + pageAddr + ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) + ", page=" + U.hexLong(System.identityHashCode(page)) + ']', state & 0xFF, PageUtils.getByte(pageAddr, i) & 0xFF);
}
}
state = (state + 1) & 0xFF;
if (VERBOSE)
info("Write page [fullId=" + fullId + ", state=" + state + ", buf=" + pageAddr + ", bhc=" + U.hexLong(System.identityHashCode(pageAddr)) + ", page=" + U.hexLong(System.identityHashCode(page)) + ']');
for (int i = PageIO.COMMON_HEADER_END; i < mem.realPageSize(fullId.groupId()); i++) PageUtils.putByte(pageAddr, i, (byte) state);
resMap.put(fullId, state);
} finally {
mem.writeUnlock(fullId.groupId(), fullId.pageId(), page, null, true);
}
} finally {
mem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
} finally {
ig.context().cache().context().database().checkpointReadUnlock();
}
} finally {
updLock.readLock().unlock();
}
}
}
}, 8, "update-thread");
int checkpoints = 20;
while (checkpoints > 0) {
Map<FullPageId, Integer> snapshot = null;
Collection<FullPageId> pageIds;
updLock.writeLock().lock();
try {
snapshot = new HashMap<>(resMap);
pageIds = mem.beginCheckpoint(new GridFinishedFuture());
checkpoints--;
if (checkpoints == 0)
// No more writes should be done at this point.
run.set(false);
info("Acquired pages for checkpoint: " + pageIds.size());
} finally {
updLock.writeLock().unlock();
}
boolean ok = false;
try {
ByteBuffer tmpBuf = ByteBuffer.allocate(mem.pageSize());
tmpBuf.order(ByteOrder.nativeOrder());
long begin = System.currentTimeMillis();
long cp = 0;
long write = 0;
for (FullPageId fullId : pageIds) {
long cpStart = System.nanoTime();
Integer tag;
AtomicReference<Integer> tag0 = new AtomicReference<>();
PageStoreWriter pageStoreWriter = (fullPageId, buf, tagx) -> {
tag0.set(tagx);
};
while (true) {
mem.checkpointWritePage(fullId, tmpBuf, pageStoreWriter, null);
tag = tag0.get();
if (tag != null && tag == PageMemoryImpl.TRY_AGAIN_TAG)
continue;
break;
}
if (tag == null)
continue;
long cpEnd = System.nanoTime();
cp += cpEnd - cpStart;
Integer state = snapshot.get(fullId);
if (allocated.contains(fullId) && state != -1) {
tmpBuf.rewind();
Integer first = null;
for (int i = PageIO.COMMON_HEADER_END; i < mem.realPageSize(fullId.groupId()); i++) {
int val = tmpBuf.get(i) & 0xFF;
if (first == null)
first = val;
// Avoid string concat.
if (first != val)
assertEquals("Corrupted buffer at position [pageId=" + fullId + ", pos=" + i + ']', (int) first, val);
// Avoid string concat.
if (state != val)
assertEquals("Invalid value at position [pageId=" + fullId + ", pos=" + i + ']', (int) state, val);
}
}
tmpBuf.rewind();
long writeStart = System.nanoTime();
storeMgr.write(cacheId, fullId.pageId(), tmpBuf, tag, true);
long writeEnd = System.nanoTime();
write += writeEnd - writeStart;
tmpBuf.rewind();
}
long syncStart = System.currentTimeMillis();
storeMgr.sync(cacheId, 0);
long end = System.currentTimeMillis();
info("Written pages in " + (end - begin) + "ms, copy took " + (cp / 1_000_000) + "ms, " + "write took " + (write / 1_000_000) + "ms, sync took " + (end - syncStart) + "ms");
ok = true;
} finally {
info("Finishing checkpoint...");
mem.finishCheckpoint();
info("Finished checkpoint");
if (!ok) {
info("Cancelling updates...");
run.set(false);
updFut.get();
}
}
if (checkpoints != 0)
Thread.sleep(2_000);
}
info("checkpoints=" + checkpoints + ", done=" + updFut.isDone());
updFut.get();
assertEquals(0, mem.activePagesCount());
for (FullPageId fullId : pages) {
long page = mem.acquirePage(fullId.groupId(), fullId.pageId());
try {
assertFalse("Page has a temp heap copy after the last checkpoint: [cacheId=" + fullId.groupId() + ", pageId=" + fullId.pageId() + "]", mem.hasTempCopy(page));
assertFalse("Page is dirty after the last checkpoint: [cacheId=" + fullId.groupId() + ", pageId=" + fullId.pageId() + "]", mem.isDirty(fullId.groupId(), fullId.pageId(), page));
} finally {
mem.releasePage(fullId.groupId(), fullId.pageId(), page);
}
}
return F.t((Map<FullPageId, Integer>) resMap, start);
}
use of org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryImpl in project ignite by apache.
the class IgniteSequentialNodeCrashRecoveryTest method captureDirtyPages.
/**
* @param g Ignite instance.
* @throws IgniteCheckedException If failed.
*/
private Collection<FullPageId> captureDirtyPages(IgniteEx g) throws IgniteCheckedException {
GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) g.context().cache().context().database();
dbMgr.checkpointReadLock();
try {
// Moving free list pages to offheap.
for (CacheGroupContext group : g.context().cache().cacheGroups()) {
((GridCacheOffheapManager) group.offheap()).onMarkCheckpointBegin(new DummyCheckpointContext());
}
} finally {
dbMgr.checkpointReadUnlock();
}
// Capture a set of dirty pages.
PageMemoryImpl pageMem = (PageMemoryImpl) dbMgr.dataRegion("default").pageMemory();
return pageMem.dirtyPages();
}
Aggregations