Search in sources :

Example 1 with SegmentAware

use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.

the class FileWriteAheadLogManager method start0.

/**
 * {@inheritDoc}
 */
@Override
public void start0() throws IgniteCheckedException {
    if (cctx.kernalContext().clientNode())
        return;
    final PdsFolderSettings resolveFolders = cctx.kernalContext().pdsFolderResolver().resolveFolders();
    checkWalConfiguration();
    synchronized (this) {
        final File walWorkDir0 = walWorkDir = initDirectory(dsCfg.getWalPath(), DataStorageConfiguration.DFLT_WAL_PATH, resolveFolders.folderName(), "write ahead log work directory");
        final File walArchiveDir0 = walArchiveDir = initDirectory(dsCfg.getWalArchivePath(), DataStorageConfiguration.DFLT_WAL_ARCHIVE_PATH, resolveFolders.folderName(), "write ahead log archive directory");
        if (dsCfg.isCdcEnabled()) {
            walCdcDir = initDirectory(dsCfg.getCdcWalPath(), DataStorageConfiguration.DFLT_WAL_CDC_PATH, resolveFolders.folderName(), "change data capture directory");
        }
        serializer = new RecordSerializerFactoryImpl(cctx).createSerializer(serializerVer);
        GridCacheDatabaseSharedManager dbMgr = (GridCacheDatabaseSharedManager) cctx.database();
        metrics = dbMgr.persistentStoreMetricsImpl();
        if (metrics != null) {
            metrics.setWalSizeProvider(new CO<Long>() {

                /**
                 * {@inheritDoc}
                 */
                @Override
                public Long apply() {
                    long size = 0;
                    for (File f : walWorkDir0.listFiles()) size += f.length();
                    if (isArchiverEnabled()) {
                        for (File f : walArchiveDir0.listFiles()) size += f.length();
                    }
                    return size;
                }
            });
        }
        segmentAware = new SegmentAware(log, dsCfg.getWalSegments(), dsCfg.isWalCompactionEnabled(), minWalArchiveSize, maxWalArchiveSize);
        // Otherwise, FileArchiver initialization will trigger redundant work for FileCompressor.
        if (dsCfg.isWalCompactionEnabled()) {
            compressor = new FileCompressor(log);
            decompressor = new FileDecompressor(log);
        }
        if (isArchiverEnabled())
            archiver = new FileArchiver(log);
        if (!walArchiveUnlimited())
            cleaner = new FileCleaner(log);
        prepareAndCheckWalFiles();
        if (compressor != null)
            compressor.initAlreadyCompressedSegments();
        if (archiver != null)
            archiver.init(segmentAware);
        segmentRouter = new SegmentRouter(walWorkDir, walArchiveDir, segmentAware, dsCfg);
        fileHandleManager = fileHandleManagerFactory.build(cctx, metrics, mmap, serializer, this::currentHandle);
        lockedSegmentFileInputFactory = new LockedSegmentFileInputFactory(segmentAware, segmentRouter, ioFactory);
        pageCompression = dsCfg.getWalPageCompression();
        if (pageCompression != DiskPageCompression.DISABLED) {
            if (serializerVer < 2) {
                throw new IgniteCheckedException("WAL page snapshots compression not supported for serializerVer=" + serializerVer);
            }
            cctx.kernalContext().compress().checkPageCompressionSupported();
            pageCompressionLevel = dsCfg.getWalPageCompressionLevel() != null ? checkCompressionLevelBounds(dsCfg.getWalPageCompressionLevel(), pageCompression) : getDefaultCompressionLevel(pageCompression);
        }
    }
}
Also used : GridCacheDatabaseSharedManager(org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager) LockedSegmentFileInputFactory(org.apache.ignite.internal.processors.cache.persistence.wal.io.LockedSegmentFileInputFactory) SegmentAware(org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) RecordSerializerFactoryImpl(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializerFactoryImpl) AtomicLong(java.util.concurrent.atomic.AtomicLong) PdsFolderSettings(org.apache.ignite.internal.processors.cache.persistence.filename.PdsFolderSettings) File(java.io.File)

Example 2 with SegmentAware

use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.

the class IgnitePdsStartWIthEmptyArchive method test.

/**
 * @throws Exception If failed.
 */
@Test
public void test() throws Exception {
    IgniteEx ig = startGrid(0);
    ig.cluster().active(true);
    FileWriteAheadLogManager walMgr = (FileWriteAheadLogManager) ig.context().cache().context().wal();
    // Populate data for generate WAL archive segments.
    try (IgniteDataStreamer<Integer, byte[]> st = ig.dataStreamer(DEFAULT_CACHE_NAME)) {
        int entries = 1000;
        for (int i = 0; i < entries; i++) st.addData(i, new byte[1024 * 1024]);
    }
    File archiveDir = U.field(walMgr, "walArchiveDir");
    stopGrid(0, false);
    SegmentAware beforeSaw = U.field(walMgr, "segmentAware");
    long beforeLastArchivedAbsoluteIdx = beforeSaw.lastArchivedAbsoluteIndex();
    FileWriteHandle fhBefore = U.field(walMgr, "currHnd");
    long idxBefore = fhBefore.getSegmentId();
    File[] files = archiveDir.listFiles(WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER);
    Arrays.sort(files);
    // Cleanup archive directory.
    for (File f : files) {
        if (f.delete())
            log.info("File " + f.getAbsolutePath() + " deleted");
    }
    Assert.assertEquals(0, archiveDir.listFiles().length);
    evts.clear();
    // Restart grid again after archive was removed.
    ig = startGrid(0);
    walMgr = (FileWriteAheadLogManager) ig.context().cache().context().wal();
    SegmentAware afterSaw = U.field(walMgr, "segmentAware");
    long afterLastArchivedAbsoluteIndex = afterSaw.lastArchivedAbsoluteIndex();
    int segments = ig.configuration().getDataStorageConfiguration().getWalSegments();
    Assert.assertTrue("lastArchivedBeforeIdx=" + beforeLastArchivedAbsoluteIdx + ", lastArchivedAfterIdx=" + afterLastArchivedAbsoluteIndex + ",  segments=" + segments, afterLastArchivedAbsoluteIndex >= (beforeLastArchivedAbsoluteIdx - segments));
    ig.cluster().active(true);
    FileWriteHandle fhAfter = U.field(walMgr, "currHnd");
    Assert.assertNotNull(fhAfter);
    long idxAfter = fhAfter.getSegmentId();
    Assert.assertEquals(idxBefore, idxAfter);
    Assert.assertTrue(idxAfter >= beforeLastArchivedAbsoluteIdx);
    log.info("currentIdx=" + idxAfter + ", lastArchivedBeforeIdx=" + beforeLastArchivedAbsoluteIdx + ", lastArchivedAfteridx=" + afterLastArchivedAbsoluteIndex + ",  segments=" + segments);
    // One is a last archived, secod is a current write segment.
    final long awaitAchviedSegments = idxAfter - afterLastArchivedAbsoluteIndex - 2;
    // Await all current available semgment will be archived.
    assertTrue(GridTestUtils.waitForCondition(new GridAbsPredicate() {

        @Override
        public boolean apply() {
            long cut = evts.keySet().stream().filter(e -> e > afterLastArchivedAbsoluteIndex).count();
            return cut >= awaitAchviedSegments;
        }
    }, 10_000));
}
Also used : FileWriteAheadLogManager(org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager) Arrays(java.util.Arrays) U(org.apache.ignite.internal.util.typedef.internal.U) EVT_WAL_SEGMENT_ARCHIVED(org.apache.ignite.events.EventType.EVT_WAL_SEGMENT_ARCHIVED) HashMap(java.util.HashMap) IgniteEx(org.apache.ignite.internal.IgniteEx) FileWriteHandle(org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.FileWriteHandle) SegmentAware(org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware) IgnitePredicate(org.apache.ignite.lang.IgnitePredicate) Map(java.util.Map) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) After(org.junit.After) DataStorageConfiguration(org.apache.ignite.configuration.DataStorageConfiguration) Before(org.junit.Before) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER(org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager.WAL_SEGMENT_COMPACTED_OR_RAW_FILE_FILTER) Event(org.apache.ignite.events.Event) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) Test(org.junit.Test) File(java.io.File) GridTestUtils(org.apache.ignite.testframework.GridTestUtils) IgniteConfiguration(org.apache.ignite.configuration.IgniteConfiguration) Ignore(org.junit.Ignore) CacheConfiguration(org.apache.ignite.configuration.CacheConfiguration) IgniteDataStreamer(org.apache.ignite.IgniteDataStreamer) WalSegmentArchivedEvent(org.apache.ignite.events.WalSegmentArchivedEvent) Assert(org.junit.Assert) DataRegionConfiguration(org.apache.ignite.configuration.DataRegionConfiguration) GridAbsPredicate(org.apache.ignite.internal.util.lang.GridAbsPredicate) IgniteEx(org.apache.ignite.internal.IgniteEx) FileWriteAheadLogManager(org.apache.ignite.internal.processors.cache.persistence.wal.FileWriteAheadLogManager) FileWriteHandle(org.apache.ignite.internal.processors.cache.persistence.wal.filehandle.FileWriteHandle) File(java.io.File) SegmentAware(org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware) GridCommonAbstractTest(org.apache.ignite.testframework.junits.common.GridCommonAbstractTest) Test(org.junit.Test)

Example 3 with SegmentAware

use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.

the class ReleaseSegmentOnHistoricalRebalanceTest method testNoReserveSegment.

/**
 * Checks that that if there is no reservation,
 * there will be no errors and the rebalance will be completed.
 *
 * @throws Exception If failed.
 */
@Test
public void testNoReserveSegment() throws Exception {
    checkHistoricalRebalance(n -> {
        SegmentAware spy = spy(segmentAware(n));
        when(spy.reserve(anyLong())).thenAnswer(m -> false);
        segmentAware(n, spy);
    });
}
Also used : SegmentAware(org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware) Test(org.junit.Test)

Example 4 with SegmentAware

use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.

the class ReleaseSegmentOnHistoricalRebalanceTest method testImmediateReleaseSegment.

/**
 * Checks that if the reservation is released immediately,
 * there will be no errors and the rebalance will be completed.
 *
 * @throws Exception If failed.
 */
@Test
public void testImmediateReleaseSegment() throws Exception {
    checkHistoricalRebalance(n -> {
        SegmentAware spy = spy(segmentAware(n));
        doAnswer(m -> {
            Object o = m.callRealMethod();
            spy.release(m.getArgument(0));
            return o;
        }).when(spy).reserve(anyLong());
        segmentAware(n, spy);
    });
}
Also used : SegmentAware(org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware) Test(org.junit.Test)

Example 5 with SegmentAware

use of org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware in project ignite by apache.

the class IgniteWalIteratorSwitchSegmentTest method checkSwitchReadingSegmentDuringIteration.

/**
 * @param serVer WAL serializer version.
 * @throws Exception If some thing failed.
 */
private void checkSwitchReadingSegmentDuringIteration(int serVer) throws Exception {
    String workDir = U.defaultWorkDirectory();
    T2<IgniteWriteAheadLogManager, RecordSerializer> initTup = initiate(serVer, workDir);
    IgniteWriteAheadLogManager walMgr = initTup.get1();
    RecordSerializer recordSerializer = initTup.get2();
    MetastoreDataRecord rec = new MetastoreDataRecord("0", new byte[100]);
    int recSize = recordSerializer.size(rec);
    // Add more record for rollover to the next segment.
    int recordsToWrite = SEGMENT_SIZE / recSize + 100;
    SegmentAware segmentAware = GridTestUtils.getFieldValue(walMgr, "segmentAware");
    // Guard from archiving before iterator would be created.
    assertTrue(segmentAware.lock(0));
    for (int i = 0; i < recordsToWrite; i++) walMgr.log(new MetastoreDataRecord(rec.key(), rec.value()));
    walMgr.flush(null, true);
    AtomicInteger actualRecords = new AtomicInteger(0);
    AtomicReference<String> startedSegmentPath = new AtomicReference<>();
    AtomicReference<String> finishedSegmentPath = new AtomicReference<>();
    CountDownLatch startedIterLatch = new CountDownLatch(1);
    CountDownLatch finishedArchivedLatch = new CountDownLatch(1);
    IgniteInternalFuture<?> fut = GridTestUtils.runAsync(() -> {
        // Check that switch segment works as expected and all record is reachable.
        try (WALIterator it = walMgr.replay(null)) {
            Object handle = getFieldValueHierarchy(it, "currWalSegment");
            FileInput in = getFieldValueHierarchy(handle, "in");
            Object delegate = getFieldValueHierarchy(in.io(), "delegate");
            Channel ch = getFieldValueHierarchy(delegate, "ch");
            String path = getFieldValueHierarchy(ch, "path");
            startedSegmentPath.set(path);
            startedIterLatch.countDown();
            while (it.hasNext()) {
                IgniteBiTuple<WALPointer, WALRecord> tup = it.next();
                WALRecord rec0 = tup.get2();
                if (rec0.type() == METASTORE_DATA_RECORD)
                    actualRecords.incrementAndGet();
                finishedArchivedLatch.await();
            }
            in = getFieldValueHierarchy(handle, "in");
            delegate = getFieldValueHierarchy(in.io(), "delegate");
            ch = getFieldValueHierarchy(delegate, "ch");
            path = getFieldValueHierarchy(ch, "path");
            finishedSegmentPath.set(path);
        }
        return null;
    });
    startedIterLatch.await();
    segmentAware.unlock(0);
    waitForCondition(() -> segmentAware.lastArchivedAbsoluteIndex() == 0, 5000);
    finishedArchivedLatch.countDown();
    fut.get();
    // should started iteration from work directory but finish from archive directory.
    assertEquals(workDir + WORK_SUB_DIR + File.separator + "0000000000000000.wal", startedSegmentPath.get());
    assertEquals(workDir + ARCHIVE_SUB_DIR + File.separator + "0000000000000000.wal", finishedSegmentPath.get());
    Assert.assertEquals("Not all records read during iteration.", recordsToWrite, actualRecords.get());
}
Also used : WALRecord(org.apache.ignite.internal.pagemem.wal.record.WALRecord) IgniteWriteAheadLogManager(org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager) Channel(java.nio.channels.Channel) AtomicReference(java.util.concurrent.atomic.AtomicReference) FileInput(org.apache.ignite.internal.processors.cache.persistence.wal.io.FileInput) CountDownLatch(java.util.concurrent.CountDownLatch) SegmentAware(org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) WALIterator(org.apache.ignite.internal.pagemem.wal.WALIterator) MetastoreDataRecord(org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord) WALPointer(org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer) RecordSerializer(org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer)

Aggregations

SegmentAware (org.apache.ignite.internal.processors.cache.persistence.wal.aware.SegmentAware)6 File (java.io.File)3 Test (org.junit.Test)3 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)2 IgniteWriteAheadLogManager (org.apache.ignite.internal.pagemem.wal.IgniteWriteAheadLogManager)2 WALIterator (org.apache.ignite.internal.pagemem.wal.WALIterator)2 MetastoreDataRecord (org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord)2 WALRecord (org.apache.ignite.internal.pagemem.wal.record.WALRecord)2 WALPointer (org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer)2 RecordSerializer (org.apache.ignite.internal.processors.cache.persistence.wal.serializer.RecordSerializer)2 Channel (java.nio.channels.Channel)1 Arrays (java.util.Arrays)1 HashMap (java.util.HashMap)1 Map (java.util.Map)1 Random (java.util.Random)1 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)1 CountDownLatch (java.util.concurrent.CountDownLatch)1 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)1 AtomicLong (java.util.concurrent.atomic.AtomicLong)1 AtomicReference (java.util.concurrent.atomic.AtomicReference)1