Search in sources :

Example 1 with PageIO

use of org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO in project ignite by apache.

the class PagesList method takeEmptyPage.

/**
 * @param bucket Bucket index.
 * @param initIoVers Optional IO to initialize page.
 * @return Removed page ID.
 * @throws IgniteCheckedException If failed.
 */
protected final long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers) throws IgniteCheckedException {
    for (int lockAttempt = 0; ; ) {
        Stripe stripe = getPageForTake(bucket);
        if (stripe == null)
            return 0L;
        final long tailId = stripe.tailId;
        final long tailPage = acquirePage(tailId);
        try {
            // Explicit check.
            long tailAddr = writeLockPage(tailId, tailPage, bucket, lockAttempt++, null);
            if (tailAddr == 0L)
                continue;
            if (stripe.empty) {
                // Another thread took the last page.
                writeUnlock(tailId, tailPage, tailAddr, false);
                if (bucketsSize[bucket].get() > 0) {
                    // Ignore current attempt.
                    lockAttempt--;
                    continue;
                } else
                    return 0L;
            }
            assert PageIO.getPageId(tailAddr) == tailId : "tailId = " + tailId + ", tailPageId = " + PageIO.getPageId(tailAddr);
            assert PageIO.getType(tailAddr) == PageIO.T_PAGE_LIST_NODE;
            boolean dirty = false;
            long dataPageId;
            long recycleId = 0L;
            try {
                PagesListNodeIO io = PagesListNodeIO.VERSIONS.forPage(tailAddr);
                if (io.getNextId(tailAddr) != 0) {
                    // It is not a tail anymore, retry.
                    continue;
                }
                long pageId = io.takeAnyPage(tailAddr);
                if (pageId != 0L) {
                    decrementBucketSize(bucket);
                    if (needWalDeltaRecord(tailId, tailPage, null))
                        wal.log(new PagesListRemovePageRecord(grpId, tailId, pageId));
                    dirty = true;
                    dataPageId = pageId;
                    if (io.isEmpty(tailAddr)) {
                        long prevId = io.getPreviousId(tailAddr);
                        // to prevent empty page leak to data pages.
                        if (!isReuseBucket(bucket)) {
                            if (prevId != 0L) {
                                Boolean ok = write(prevId, cutTail, null, bucket, FALSE);
                                assert ok == TRUE : ok;
                                recycleId = recyclePage(tailId, tailPage, tailAddr, null);
                            } else
                                stripe.empty = true;
                        } else
                            stripe.empty = prevId == 0L;
                    }
                } else {
                    // a previous page, so, the current page can be collected
                    assert isReuseBucket(bucket);
                    long prevId = io.getPreviousId(tailAddr);
                    assert prevId != 0L;
                    Boolean ok = write(prevId, cutTail, bucket, FALSE);
                    assert ok == TRUE : ok;
                    decrementBucketSize(bucket);
                    if (initIoVers != null) {
                        dataPageId = PageIdUtils.changeType(tailId, FLAG_DATA);
                        PageIO initIo = initIoVers.latest();
                        initIo.initNewPage(tailAddr, dataPageId, pageSize());
                        if (needWalDeltaRecord(tailId, tailPage, null)) {
                            wal.log(new InitNewPageRecord(grpId, tailId, initIo.getType(), initIo.getVersion(), dataPageId));
                        }
                    } else
                        dataPageId = recyclePage(tailId, tailPage, tailAddr, null);
                    dirty = true;
                }
            // If we do not have a previous page (we are at head), then we still can return
            // current page but we have to drop the whole stripe. Since it is a reuse bucket,
            // we will not do that, but just return 0L, because this may produce contention on
            // meta page.
            } finally {
                writeUnlock(tailId, tailPage, tailAddr, dirty);
            }
            // Put recycled page (if any) to the reuse bucket after tail is unlocked.
            if (recycleId != 0L) {
                assert !isReuseBucket(bucket);
                reuseList.addForRecycle(new SingletonReuseBag(recycleId));
            }
            return dataPageId;
        } finally {
            releasePage(tailId, tailPage);
        }
    }
}
Also used : InitNewPageRecord(org.apache.ignite.internal.pagemem.wal.record.delta.InitNewPageRecord) PagesListInitNewPageRecord(org.apache.ignite.internal.pagemem.wal.record.delta.PagesListInitNewPageRecord) PagesListNodeIO(org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListNodeIO) PagesListRemovePageRecord(org.apache.ignite.internal.pagemem.wal.record.delta.PagesListRemovePageRecord) PageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO) AbstractDataPageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.AbstractDataPageIO)

Example 2 with PageIO

use of org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO in project ignite by apache.

the class CompressionProcessorImpl method doCompactPage.

/**
 * @param page Page buffer.
 * @param pageSize Page size.
 * @return Compacted page buffer.
 */
private ByteBuffer doCompactPage(ByteBuffer page, int pageSize) throws IgniteCheckedException {
    PageIO io = PageIO.getPageIO(page);
    ByteBuffer compactPage = compactBuf.get();
    if (io instanceof CompactablePageIO) {
        // Drop the garbage from the page.
        ((CompactablePageIO) io).compactPage(page, compactPage, pageSize);
    } else {
        // Direct buffer is required as output of this method.
        if (page.isDirect())
            return page;
        PageUtils.putBytes(GridUnsafe.bufferAddress(compactPage), 0, page.array());
        compactPage.limit(pageSize);
    }
    return compactPage;
}
Also used : CompactablePageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.CompactablePageIO) ByteBuffer(java.nio.ByteBuffer) PageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO) CompactablePageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.CompactablePageIO)

Example 3 with PageIO

use of org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO in project ignite by apache.

the class InitNewPageRecord method applyDelta.

/**
 * {@inheritDoc}
 */
@Override
public void applyDelta(PageMemory pageMem, long pageAddr) throws IgniteCheckedException {
    PageIO io = PageIO.getPageIO(ioType, ioVer);
    PageMetrics metrics = pageMem.metrics().cacheGrpPageMetrics(groupId());
    io.initNewPage(pageAddr, newPageId, pageMem.realPageSize(groupId()), metrics);
}
Also used : PageMetrics(org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMetrics) PageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO)

Example 4 with PageIO

use of org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO in project ignite by apache.

the class PagesList method takeEmptyPage.

/**
 * @param bucket Bucket index.
 * @param initIoVers Optional IO to initialize page.
 * @param statHolder Statistics holder to track IO operations.
 * @return Removed page ID.
 * @throws IgniteCheckedException If failed.
 */
protected long takeEmptyPage(int bucket, @Nullable IOVersions initIoVers, IoStatisticsHolder statHolder) throws IgniteCheckedException {
    PagesCache pagesCache = getBucketCache(bucket, false);
    long pageId;
    if (pagesCache != null && (pageId = pagesCache.poll()) != 0L) {
        decrementBucketSize(bucket);
        if (log.isDebugEnabled()) {
            log.debug("Take page from pages list cache [list=" + name() + ", bucket=" + bucket + ", pageId=" + pageId + ']');
        }
        assert !isReuseBucket(bucket) : "reuse bucket detected";
        return pageId;
    }
    for (int lockAttempt = 0; ; ) {
        Stripe stripe = getPageForTake(bucket);
        if (stripe == null)
            return 0L;
        final long tailId = stripe.tailId;
        // Stripe was removed from bucket concurrently.
        if (tailId == 0L)
            continue;
        final long tailPage = acquirePage(tailId, statHolder);
        try {
            // Explicit check.
            long tailAddr = writeLockPage(tailId, tailPage, bucket, lockAttempt++, null);
            if (tailAddr == 0L)
                continue;
            if (stripe.empty || stripe.tailId != tailId) {
                // Another thread took the last page.
                writeUnlock(tailId, tailPage, tailAddr, false);
                if (bucketsSize.get(bucket) > 0) {
                    // Ignore current attempt.
                    lockAttempt--;
                    continue;
                } else
                    return 0L;
            }
            assert PageIO.getPageId(tailAddr) == tailId : "tailId = " + U.hexLong(tailId) + ", pageId = " + U.hexLong(PageIO.getPageId(tailAddr));
            assert PageIO.getType(tailAddr) == PageIO.T_PAGE_LIST_NODE : "tailId = " + U.hexLong(tailId) + ", type = " + PageIO.getType(tailAddr);
            boolean dirty = false;
            long dataPageId;
            long recycleId = 0L;
            try {
                PagesListNodeIO io = PagesListNodeIO.VERSIONS.forPage(tailAddr);
                if (io.getNextId(tailAddr) != 0) {
                    // It is not a tail anymore, retry.
                    continue;
                }
                pageId = io.takeAnyPage(tailAddr);
                if (pageId != 0L) {
                    decrementBucketSize(bucket);
                    if (needWalDeltaRecord(tailId, tailPage, null))
                        wal.log(new PagesListRemovePageRecord(grpId, tailId, pageId));
                    dirty = true;
                    if (isReuseBucket(bucket) && !(PageIdUtils.itemId(pageId) > 0 && PageIdUtils.itemId(pageId) <= MAX_ITEMID_NUM))
                        throw corruptedFreeListException("Incorrectly recycled pageId in reuse bucket: " + U.hexLong(pageId), pageId);
                    if (isReuseBucket(bucket)) {
                        byte flag = getFlag(initIoVers);
                        PageIO initIO = initIoVers == null ? null : initIoVers.latest();
                        dataPageId = initRecycledPage0(pageId, flag, initIO);
                    } else
                        dataPageId = pageId;
                    if (io.isEmpty(tailAddr)) {
                        long prevId = io.getPreviousId(tailAddr);
                        // to prevent empty page leak to data pages.
                        if (!isReuseBucket(bucket)) {
                            if (prevId != 0L) {
                                Boolean ok = write(prevId, cutTail, null, bucket, FALSE, statHolder);
                                assert ok == TRUE : ok;
                                recycleId = recyclePage(tailId, tailPage, tailAddr, null);
                            } else
                                stripe.empty = true;
                        } else
                            stripe.empty = prevId == 0L;
                    }
                } else {
                    // a previous page, so, the current page can be collected
                    assert isReuseBucket(bucket);
                    long prevId = io.getPreviousId(tailAddr);
                    assert prevId != 0L;
                    Boolean ok = write(prevId, cutTail, bucket, FALSE, statHolder);
                    assert ok == TRUE : ok;
                    decrementBucketSize(bucket);
                    byte flag = getFlag(initIoVers);
                    PageIO pageIO = initIoVers != null ? initIoVers.latest() : null;
                    dataPageId = initReusedPage(tailId, tailPage, tailAddr, PageIdUtils.partId(tailId), flag, pageIO);
                    dirty = true;
                }
            // If we do not have a previous page (we are at head), then we still can return
            // current page but we have to drop the whole stripe. Since it is a reuse bucket,
            // we will not do that, but just return 0L, because this may produce contention on
            // meta page.
            } finally {
                writeUnlock(tailId, tailPage, tailAddr, dirty);
            }
            // Put recycled page (if any) to the reuse bucket after tail is unlocked.
            if (recycleId != 0L) {
                assert !isReuseBucket(bucket);
                reuseList.addForRecycle(new SingletonReuseBag(recycleId));
            }
            if (log.isDebugEnabled()) {
                log.debug("Take page from pages list [list=" + name() + ", bucket=" + bucket + ", dataPageId=" + dataPageId + ", tailId=" + tailId + ']');
            }
            return dataPageId;
        } finally {
            releasePage(tailId, tailPage);
        }
    }
}
Also used : PagesListNodeIO(org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListNodeIO) PagesListRemovePageRecord(org.apache.ignite.internal.pagemem.wal.record.delta.PagesListRemovePageRecord) PageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO) AbstractDataPageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.AbstractDataPageIO)

Example 5 with PageIO

use of org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO in project ignite by apache.

the class IgnitePdsTaskCancelingTest method testFilePageStoreInterruptThreads.

/**
 * Test FilePageStore with multiple interrupted threads.
 */
@Test
public void testFilePageStoreInterruptThreads() throws Exception {
    failure.set(false);
    FileIOFactory factory = new RandomAccessFileIOFactory();
    File file = new File(U.defaultWorkDirectory(), "file.bin");
    file.deleteOnExit();
    DataStorageConfiguration dbCfg = getDataStorageConfiguration();
    FilePageStore pageStore = new FilePageStore(PageMemory.FLAG_DATA, file::toPath, factory, dbCfg.getPageSize(), val -> {
    });
    int pageSize = dbCfg.getPageSize();
    PageIO pageIO = PageIO.getPageIO(PageIO.T_DATA, 1);
    long ptr = GridUnsafe.allocateMemory(NUM_TASKS * pageSize);
    try {
        List<Thread> threadList = new ArrayList<>(NUM_TASKS);
        AtomicBoolean stopThreads = new AtomicBoolean(false);
        for (int i = 0; i < NUM_TASKS; i++) {
            long pageId = PageIdUtils.pageId(0, PageMemory.FLAG_DATA, (int) pageStore.allocatePage());
            long pageAdr = ptr + i * pageSize;
            pageIO.initNewPage(pageAdr, pageId, pageSize, null);
            ByteBuffer buf = GridUnsafe.wrapPointer(pageAdr, pageSize);
            pageStore.write(pageId, buf, 0, true);
            threadList.add(new Thread(new Runnable() {

                @Override
                public void run() {
                    Random random = new Random();
                    while (!stopThreads.get()) {
                        buf.position(0);
                        try {
                            if (random.nextBoolean()) {
                                log.info(">>> Read page " + U.hexLong(pageId));
                                pageStore.read(pageId, buf, false);
                            } else {
                                log.info(">>> Write page " + U.hexLong(pageId));
                                pageStore.write(pageId, buf, 0, true);
                            }
                            Thread.interrupted();
                        } catch (Exception e) {
                            log.error("Error while reading/writing page", e);
                            failure.set(true);
                        }
                    }
                }
            }));
        }
        for (Thread thread : threadList) thread.start();
        for (int i = 0; i < 10; i++) {
            for (Thread thread : threadList) {
                doSleep(10L);
                log.info("Interrupting " + thread.getName());
                thread.interrupt();
            }
        }
        stopThreads.set(true);
        for (Thread thread : threadList) thread.join();
        assertFalse(failure.get());
    } finally {
        GridUnsafe.freeMemory(ptr);
    }
}
Also used : FileIOFactory(org.apache.ignite.internal.processors.cache.persistence.file.FileIOFactory) RandomAccessFileIOFactory(org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory) ArrayList(java.util.ArrayList) FilePageStore(org.apache.ignite.internal.processors.cache.persistence.file.FilePageStore) ByteBuffer(java.nio.ByteBuffer) IOException(java.io.IOException) DataStorageConfiguration(org.apache.ignite.configuration.DataStorageConfiguration) AtomicBoolean(java.util.concurrent.atomic.AtomicBoolean) Random(java.util.Random) IgniteRunnable(org.apache.ignite.lang.IgniteRunnable) File(java.io.File) RandomAccessFileIOFactory(org.apache.ignite.internal.processors.cache.persistence.file.RandomAccessFileIOFactory) PageIO(org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) Test(org.junit.Test)

Aggregations

PageIO (org.apache.ignite.internal.processors.cache.persistence.tree.io.PageIO)15 ByteBuffer (java.nio.ByteBuffer)9 AbstractDataPageIO (org.apache.ignite.internal.processors.cache.persistence.tree.io.AbstractDataPageIO)6 ArrayList (java.util.ArrayList)5 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)4 IgniteException (org.apache.ignite.IgniteException)4 PagesListNodeIO (org.apache.ignite.internal.processors.cache.persistence.freelist.io.PagesListNodeIO)4 IOException (java.io.IOException)3 AtomicBoolean (java.util.concurrent.atomic.AtomicBoolean)3 CompactablePageIO (org.apache.ignite.internal.processors.cache.persistence.tree.io.CompactablePageIO)3 File (java.io.File)2 HashMap (java.util.HashMap)2 HashSet (java.util.HashSet)2 LinkedList (java.util.LinkedList)2 List (java.util.List)2 Map (java.util.Map)2 Set (java.util.Set)2 AtomicLong (java.util.concurrent.atomic.AtomicLong)2 AtomicReference (java.util.concurrent.atomic.AtomicReference)2 IgniteInternalFuture (org.apache.ignite.internal.IgniteInternalFuture)2